summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorneal <neal>2007-11-16 13:35:00 +0000
committerneal <neal>2007-11-16 13:35:00 +0000
commite46ff816c662bc8b47dfc00bbe5501dbeffd93bb (patch)
tree16603b813cf7301b85b58f28217aa188825513d8
parent17b21c229fe9756a2e9ec158b6bdf5c2ca9869a5 (diff)
2007-11-16 Neal H. Walfield <neal@gnu.org>
* viengoos/Makefile.am: New file based on ../wortel/Makefile.am. * viengoos/headers.m4: New file. * viengoos/config.m4: New file based on ../wortel/config.m4. * viengoos/viengoos.h: New file. * viengoos/viengoos.c: New file. * viengoos/activity.h: Likewise. * viengoos/activity.c: Likewise. * viengoos/as.h: Likewise. * viengoos/as.c: Likewise. * viengoos/cap-lookup.c: Likewise. * viengoos/cap.h: Likewise. * viengoos/cap.c: Likewise. * viengoos/thread.h: New file. * viengoos/thread.c: New file. * viengoos/object.h: New file. * viengoos/object.c: New file. * viengoos/rm.h: New file. * viengoos/server.c: New file. * viengoos/server.h: New file. * viengoos/zalloc.h: Copied from ../physmem. * viengoos/zalloc.c: Copied from ../physmem. Don't include "output.h". Include <hurd/stddef.h>. Change uses of min_page_size to PAGESIZE. * viengoos/memory.h: New file. * viengoos/memory.c: New file. * viengoos/sigma0.c: Copy from ../wortel. * viengoos/sigma0.h: Copy from ../wortel. Don't include "shutdown.h". Include <hurd/stddef.h>. * viengoos/bits.h: Likewise. * viengoos/panic.c: New file. * viengoos/debug.c: Likewise. * viengoos/debug.h: Likewise. * viengoos/boot-modules.h: Likewise. * viengoos/boot-modules.c: Likewise. * viengoos/elf.h: Copied from ../wortel. * viengoos/loader.c: New file based on ../wortel/loader.c. * viengoos/loader.h: New file. * viengoos/multiboot.h: Copied from Grub. * viengoos/mmap.c: New file based on ../physmem/mmap.c. * viengoos/malloc-wrap.c: New file based on ../physmem/malloc-wrap.c. * viengoos/malloc.c: Version 2.8.3 of Doug Lea's malloc.c. * viengoos/malloc.h: Version 2.8.3 of Doug Lea's malloc.h. * viengoos/ia32-cmain.c: New file based on ../wortel/ia32-cmain.c. * viengoos/ia32-crt0.S: Copied from ../wortel. (STACK_SIZE): Use a 16 page stack. * viengoos/ia32-output.c: Copied from ../wortel. * viengoos/ia32-shutdown.c: Likewise. * viengoos/output.h: New file based on ../wortel/output.h. Include <stdarg.h>. (cprintf): New definition. (output_debug): Don't define. (debug): Don't define. * viengoos/output.c: New file based on ../wortel/output.c. Don't include <stdlib.h>. (vprintf): New function. (printf): Implement in terms of vprintf. * viengoos/output-none.c: Copied from ../wortel. * viengoos/output-serial.c: Likewise. * viengoos/output-stdio.c: New file. * viengoos/output-vga.c: Copied from ../wortel. * viengoos/shutdown.h: New file based on ../wortel/shutdown.h. Don't include "output.h". (panic): Don't define. (shutdown): Rename from this... (shutdown_machine): ... to this. * viengoos/shutdown.c: New file based on ../wortel/shutdown.c. (reset) [_L4_TEST_ENVIRONMENT]: Call abort. (halt) [_L4_TEST_ENVIRONMENT]: Call abort. (shutdown): Rename from this... (shutdown_machine): ... to this. * viengoos/t-environment.h: New file based on ../libl4/tests/environment.h. Protect from multiple inclusion. Include <hurd/stddef.h>. Include <string.h>. Include <l4/stubs.h>. (program_name): New declaration. (check_nr): Don't assume that val1 and val2 are _L4_word_t, use typeof instead. (main): Call output_init. * viengoos/t-as.c: New file.
-rw-r--r--viengoos/ChangeLog95
-rw-r--r--viengoos/Makefile.am84
-rw-r--r--viengoos/Makefile.in1338
-rw-r--r--viengoos/activity.c137
-rw-r--r--viengoos/activity.h91
-rw-r--r--viengoos/as.c550
-rw-r--r--viengoos/as.h52
-rw-r--r--viengoos/bits.h87
-rw-r--r--viengoos/boot-modules.c24
-rw-r--r--viengoos/boot-modules.h39
-rw-r--r--viengoos/cap-lookup.c324
-rw-r--r--viengoos/cap.c166
-rw-r--r--viengoos/cap.h44
-rw-r--r--viengoos/config.m421
-rw-r--r--viengoos/debug.c23
-rw-r--r--viengoos/debug.h35
-rw-r--r--viengoos/elf.h2438
-rw-r--r--viengoos/headers.m413
-rw-r--r--viengoos/ia32-cmain.c146
-rw-r--r--viengoos/ia32-crt0.S47
-rw-r--r--viengoos/ia32-output.c39
-rw-r--r--viengoos/ia32-shutdown.c52
-rw-r--r--viengoos/loader.c164
-rw-r--r--viengoos/loader.h39
-rw-r--r--viengoos/malloc-wrap.c65
-rw-r--r--viengoos/malloc.c5067
-rw-r--r--viengoos/malloc.h529
-rw-r--r--viengoos/memory.c358
-rw-r--r--viengoos/memory.h76
-rw-r--r--viengoos/mmap.c57
-rw-r--r--viengoos/multiboot.h121
-rw-r--r--viengoos/object.c443
-rw-r--r--viengoos/object.h270
-rw-r--r--viengoos/output-none.c33
-rw-r--r--viengoos/output-serial.c160
-rw-r--r--viengoos/output-stdio.c51
-rw-r--r--viengoos/output-vga.c147
-rw-r--r--viengoos/output.c283
-rw-r--r--viengoos/output.h75
-rw-r--r--viengoos/panic.c42
-rw-r--r--viengoos/rm.h437
-rw-r--r--viengoos/server.c515
-rw-r--r--viengoos/server.h27
-rw-r--r--viengoos/shutdown.c83
-rw-r--r--viengoos/shutdown.h42
-rw-r--r--viengoos/sigma0.c148
-rw-r--r--viengoos/sigma0.h42
-rw-r--r--viengoos/t-as.c322
-rw-r--r--viengoos/t-environment.h284
-rw-r--r--viengoos/thread.c261
-rw-r--r--viengoos/thread.h110
-rw-r--r--viengoos/viengoos.c449
-rw-r--r--viengoos/viengoos.h40
-rw-r--r--viengoos/zalloc.c285
-rw-r--r--viengoos/zalloc.h40
55 files changed, 16910 insertions, 0 deletions
diff --git a/viengoos/ChangeLog b/viengoos/ChangeLog
new file mode 100644
index 0000000..7303d54
--- /dev/null
+++ b/viengoos/ChangeLog
@@ -0,0 +1,95 @@
+2007-11-16 Neal H. Walfield <neal@gnu.org>
+
+ * viengoos/Makefile.am: New file based on ../wortel/Makefile.am.
+ * viengoos/headers.m4: New file.
+ * viengoos/config.m4: New file based on ../wortel/config.m4.
+
+ * viengoos/viengoos.h: New file.
+ * viengoos/viengoos.c: New file.
+ * viengoos/activity.h: Likewise.
+ * viengoos/activity.c: Likewise.
+ * viengoos/as.h: Likewise.
+ * viengoos/as.c: Likewise.
+ * viengoos/cap-lookup.c: Likewise.
+ * viengoos/cap.h: Likewise.
+ * viengoos/cap.c: Likewise.
+ * viengoos/thread.h: New file.
+ * viengoos/thread.c: New file.
+ * viengoos/object.h: New file.
+ * viengoos/object.c: New file.
+ * viengoos/rm.h: New file.
+ * viengoos/server.c: New file.
+ * viengoos/server.h: New file.
+
+ * viengoos/zalloc.h: Copied from ../physmem.
+ * viengoos/zalloc.c: Copied from ../physmem.
+ Don't include "output.h".
+ Include <hurd/stddef.h>.
+ Change uses of min_page_size to PAGESIZE.
+ * viengoos/memory.h: New file.
+ * viengoos/memory.c: New file.
+ * viengoos/sigma0.c: Copy from ../wortel.
+ * viengoos/sigma0.h: Copy from ../wortel.
+ Don't include "shutdown.h".
+ Include <hurd/stddef.h>.
+
+ * viengoos/bits.h: Likewise.
+
+ * viengoos/panic.c: New file.
+ * viengoos/debug.c: Likewise.
+ * viengoos/debug.h: Likewise.
+
+ * viengoos/boot-modules.h: Likewise.
+ * viengoos/boot-modules.c: Likewise.
+ * viengoos/elf.h: Copied from ../wortel.
+ * viengoos/loader.c: New file based on ../wortel/loader.c.
+ * viengoos/loader.h: New file.
+ * viengoos/multiboot.h: Copied from Grub.
+
+ * viengoos/mmap.c: New file based on ../physmem/mmap.c.
+ * viengoos/malloc-wrap.c: New file based on ../physmem/malloc-wrap.c.
+ * viengoos/malloc.c: Version 2.8.3 of Doug Lea's malloc.c.
+ * viengoos/malloc.h: Version 2.8.3 of Doug Lea's malloc.h.
+
+ * viengoos/ia32-cmain.c: New file based on ../wortel/ia32-cmain.c.
+ * viengoos/ia32-crt0.S: Copied from ../wortel.
+ (STACK_SIZE): Use a 16 page stack.
+ * viengoos/ia32-output.c: Copied from ../wortel.
+ * viengoos/ia32-shutdown.c: Likewise.
+
+ * viengoos/output.h: New file based on ../wortel/output.h.
+ Include <stdarg.h>.
+ (cprintf): New definition.
+ (output_debug): Don't define.
+ (debug): Don't define.
+ * viengoos/output.c: New file based on ../wortel/output.c.
+ Don't include <stdlib.h>.
+ (vprintf): New function.
+ (printf): Implement in terms of vprintf.
+ * viengoos/output-none.c: Copied from ../wortel.
+ * viengoos/output-serial.c: Likewise.
+ * viengoos/output-stdio.c: New file.
+ * viengoos/output-vga.c: Copied from ../wortel.
+
+ * viengoos/shutdown.h: New file based on ../wortel/shutdown.h.
+ Don't include "output.h".
+ (panic): Don't define.
+ (shutdown): Rename from this...
+ (shutdown_machine): ... to this.
+ * viengoos/shutdown.c: New file based on ../wortel/shutdown.c.
+ (reset) [_L4_TEST_ENVIRONMENT]: Call abort.
+ (halt) [_L4_TEST_ENVIRONMENT]: Call abort.
+ (shutdown): Rename from this...
+ (shutdown_machine): ... to this.
+
+ * viengoos/t-environment.h: New file based on
+ ../libl4/tests/environment.h.
+ Protect from multiple inclusion.
+ Include <hurd/stddef.h>.
+ Include <string.h>.
+ Include <l4/stubs.h>.
+ (program_name): New declaration.
+ (check_nr): Don't assume that val1 and val2 are _L4_word_t, use
+ typeof instead.
+ (main): Call output_init.
+ * viengoos/t-as.c: New file.
diff --git a/viengoos/Makefile.am b/viengoos/Makefile.am
new file mode 100644
index 0000000..1b31c27
--- /dev/null
+++ b/viengoos/Makefile.am
@@ -0,0 +1,84 @@
+# Makefile.am - Makefile template for viengoos.
+# Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+# Written by Marcus Brinkmann.
+#
+# This file is part of the GNU Hurd.
+#
+# The GNU Hurd is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This GNU Hurd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+if ARCH_IA32
+ ARCH_SOURCES = multiboot.h ia32-crt0.S ia32-cmain.c \
+ ia32-output.c output-vga.c output-serial.c ia32-shutdown.c
+endif
+
+bootdir = $(prefix)/boot
+boot_PROGRAMS = viengoos
+
+COMMON_CPPFLAGS = -I$(srcdir) -I$(top_builddir)/include \
+ -I$(top_srcdir)/libc-parts -D_GNU_SOURCE -Wall -std=gnu99 -g
+
+viengoos_CPPFLAGS = $(COMMON_CPPFLAGS) -DRM_INTERN
+viengoos_SOURCES = $(ARCH_SOURCES) \
+ output.h output.c output-none.c \
+ debug.h debug.c \
+ shutdown.h shutdown.c \
+ panic.c \
+ sigma0.h sigma0.c \
+ malloc.h malloc-wrap.c \
+ zalloc.h zalloc.c \
+ mmap.c \
+ rm.h \
+ viengoos.h viengoos.c \
+ boot-modules.h boot-modules.c \
+ memory.h memory.c \
+ object.h object.c \
+ cap.h cap.c cap-lookup.c \
+ activity.h activity.c \
+ thread.h thread.c \
+ as.h as.c \
+ bits.h \
+ elf.h loader.h loader.c \
+ server.h server.c
+
+# Doug Lea's malloc is included by malloc-wrap.c.
+EXTRA_viengoos_SOURCES = malloc.c
+
+viengoos_LDADD = -lgcc ../libc-parts/libc-parts.a \
+ ../libhurd-btree/libhurd-btree.a \
+ ../libhurd-ihash/libhurd-ihash.a
+viengoos_LDFLAGS = -u_start -e_start -N -nostdlib \
+ -Ttext=@HURD_RM_LOAD_ADDRESS@
+
+TESTS = t-as
+check_PROGRAMS = $(TESTS)
+
+t_as_CPPFLAGS = $(viengoos_CPPFLAGS) -include $(srcdir)/t-environment.h
+t_as_SOURCES = t-as.c \
+ zalloc.h zalloc.c \
+ memory.h memory.c \
+ cap.h cap.c cap-lookup.c \
+ object.h object.c \
+ activity.h activity.c \
+ thread.h thread.c \
+ as.h as.c \
+ output.h output.c output-stdio.c \
+ shutdown.h shutdown.c \
+ panic.h panic.c \
+ debug.h debug.c
+t_as_LDADD = $(viengoos_LDADD)
+
+lib_LIBRARIES = libhurd-cap.a
+libhurd_cap_a_CPPFLAGS = $(COMMON_CPPFLAGS)
+libhurd_cap_a_SOURCES = cap-lookup.c as.c as.h
+
diff --git a/viengoos/Makefile.in b/viengoos/Makefile.in
new file mode 100644
index 0000000..55b6bcd
--- /dev/null
+++ b/viengoos/Makefile.in
@@ -0,0 +1,1338 @@
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile.am - Makefile template for viengoos.
+# Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+# Written by Marcus Brinkmann.
+#
+# This file is part of the GNU Hurd.
+#
+# The GNU Hurd is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This GNU Hurd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+top_builddir = ..
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+INSTALL = @INSTALL@
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+boot_PROGRAMS = viengoos$(EXEEXT)
+check_PROGRAMS = $(am__EXEEXT_1)
+subdir = viengoos
+DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(srcdir)/rm.h
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
+ $(top_srcdir)/laden/config.m4 $(top_srcdir)/viengoos/config.m4 \
+ $(top_srcdir)/libl4/headers.m4 \
+ $(top_srcdir)/platform/headers.m4 \
+ $(top_srcdir)/hurd/headers.m4 \
+ $(top_srcdir)/libhurd-ihash/headers.m4 \
+ $(top_srcdir)/libhurd-btree/headers.m4 \
+ $(top_srcdir)/libbitarray/headers.m4 \
+ $(top_srcdir)/libhurd-slab/headers.m4 \
+ $(top_srcdir)/libpthread/headers.m4 \
+ $(top_srcdir)/libhurd-mm/headers.m4 \
+ $(top_srcdir)/viengoos/headers.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bootdir)"
+libLIBRARIES_INSTALL = $(INSTALL_DATA)
+LIBRARIES = $(lib_LIBRARIES)
+ARFLAGS = cru
+libhurd_cap_a_AR = $(AR) $(ARFLAGS)
+libhurd_cap_a_LIBADD =
+am_libhurd_cap_a_OBJECTS = libhurd_cap_a-cap-lookup.$(OBJEXT) \
+ libhurd_cap_a-as.$(OBJEXT)
+libhurd_cap_a_OBJECTS = $(am_libhurd_cap_a_OBJECTS)
+bootPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
+am__EXEEXT_1 = t-as$(EXEEXT)
+PROGRAMS = $(boot_PROGRAMS)
+am_t_as_OBJECTS = t_as-t-as.$(OBJEXT) t_as-zalloc.$(OBJEXT) \
+ t_as-memory.$(OBJEXT) t_as-cap.$(OBJEXT) \
+ t_as-cap-lookup.$(OBJEXT) t_as-object.$(OBJEXT) \
+ t_as-activity.$(OBJEXT) t_as-thread.$(OBJEXT) \
+ t_as-as.$(OBJEXT) t_as-output.$(OBJEXT) \
+ t_as-output-stdio.$(OBJEXT) t_as-shutdown.$(OBJEXT) \
+ t_as-panic.$(OBJEXT) t_as-debug.$(OBJEXT)
+t_as_OBJECTS = $(am_t_as_OBJECTS)
+am__DEPENDENCIES_1 = ../libc-parts/libc-parts.a \
+ ../libhurd-btree/libhurd-btree.a \
+ ../libhurd-ihash/libhurd-ihash.a
+t_as_DEPENDENCIES = $(am__DEPENDENCIES_1)
+am__viengoos_SOURCES_DIST = multiboot.h ia32-crt0.S ia32-cmain.c \
+ ia32-output.c output-vga.c output-serial.c ia32-shutdown.c \
+ output.h output.c output-none.c debug.h debug.c shutdown.h \
+ shutdown.c panic.c sigma0.h sigma0.c malloc.h malloc-wrap.c \
+ zalloc.h zalloc.c mmap.c rm.h viengoos.h viengoos.c \
+ boot-modules.h boot-modules.c memory.h memory.c object.h \
+ object.c cap.h cap.c cap-lookup.c activity.h activity.c \
+ thread.h thread.c as.h as.c bits.h elf.h loader.h loader.c \
+ server.h server.c
+@ARCH_IA32_TRUE@am__objects_1 = ia32-crt0.$(OBJEXT) \
+@ARCH_IA32_TRUE@ viengoos-ia32-cmain.$(OBJEXT) \
+@ARCH_IA32_TRUE@ viengoos-ia32-output.$(OBJEXT) \
+@ARCH_IA32_TRUE@ viengoos-output-vga.$(OBJEXT) \
+@ARCH_IA32_TRUE@ viengoos-output-serial.$(OBJEXT) \
+@ARCH_IA32_TRUE@ viengoos-ia32-shutdown.$(OBJEXT)
+am_viengoos_OBJECTS = $(am__objects_1) viengoos-output.$(OBJEXT) \
+ viengoos-output-none.$(OBJEXT) viengoos-debug.$(OBJEXT) \
+ viengoos-shutdown.$(OBJEXT) viengoos-panic.$(OBJEXT) \
+ viengoos-sigma0.$(OBJEXT) viengoos-malloc-wrap.$(OBJEXT) \
+ viengoos-zalloc.$(OBJEXT) viengoos-mmap.$(OBJEXT) \
+ viengoos-viengoos.$(OBJEXT) viengoos-boot-modules.$(OBJEXT) \
+ viengoos-memory.$(OBJEXT) viengoos-object.$(OBJEXT) \
+ viengoos-cap.$(OBJEXT) viengoos-cap-lookup.$(OBJEXT) \
+ viengoos-activity.$(OBJEXT) viengoos-thread.$(OBJEXT) \
+ viengoos-as.$(OBJEXT) viengoos-loader.$(OBJEXT) \
+ viengoos-server.$(OBJEXT)
+viengoos_OBJECTS = $(am_viengoos_OBJECTS)
+viengoos_DEPENDENCIES = ../libc-parts/libc-parts.a \
+ ../libhurd-btree/libhurd-btree.a \
+ ../libhurd-ihash/libhurd-ihash.a
+DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+CCASCOMPILE = $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS)
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+SOURCES = $(libhurd_cap_a_SOURCES) $(t_as_SOURCES) $(viengoos_SOURCES) \
+ $(EXTRA_viengoos_SOURCES)
+DIST_SOURCES = $(libhurd_cap_a_SOURCES) $(t_as_SOURCES) \
+ $(am__viengoos_SOURCES_DIST) $(EXTRA_viengoos_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMDEP_FALSE = @AMDEP_FALSE@
+AMDEP_TRUE = @AMDEP_TRUE@
+AMTAR = @AMTAR@
+AR = @AR@
+ARCH_ALPHA_FALSE = @ARCH_ALPHA_FALSE@
+ARCH_ALPHA_TRUE = @ARCH_ALPHA_TRUE@
+ARCH_AMD64_FALSE = @ARCH_AMD64_FALSE@
+ARCH_AMD64_TRUE = @ARCH_AMD64_TRUE@
+ARCH_IA32_FALSE = @ARCH_IA32_FALSE@
+ARCH_IA32_TRUE = @ARCH_IA32_TRUE@
+ARCH_IA64_FALSE = @ARCH_IA64_FALSE@
+ARCH_IA64_TRUE = @ARCH_IA64_TRUE@
+ARCH_POWERPC64_FALSE = @ARCH_POWERPC64_FALSE@
+ARCH_POWERPC64_TRUE = @ARCH_POWERPC64_TRUE@
+ARCH_POWERPC_FALSE = @ARCH_POWERPC_FALSE@
+ARCH_POWERPC_TRUE = @ARCH_POWERPC_TRUE@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DVIPS = @DVIPS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EXEEXT = @EXEEXT@
+FIG2DEV = @FIG2DEV@
+HURD_LADEN_LOAD_ADDRESS = @HURD_LADEN_LOAD_ADDRESS@
+HURD_RM_LOAD_ADDRESS = @HURD_RM_LOAD_ADDRESS@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+L4_ABI_V2_FALSE = @L4_ABI_V2_FALSE@
+L4_ABI_V2_TRUE = @L4_ABI_V2_TRUE@
+L4_ABI_X2_FALSE = @L4_ABI_X2_FALSE@
+L4_ABI_X2_TRUE = @L4_ABI_X2_TRUE@
+LATEX = @LATEX@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
+MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
+MAKEINFO = @MAKEINFO@
+NM = @NM@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PS2PDF = @PS2PDF@
+RANLIB = @RANLIB@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SORT = @SORT@
+STATIC_GLIBC = @STATIC_GLIBC@
+STRIP = @STRIP@
+VERSION = @VERSION@
+WITH_LIBC_FALSE = @WITH_LIBC_FALSE@
+WITH_LIBC_TRUE = @WITH_LIBC_TRUE@
+ac_ct_CC = @ac_ct_CC@
+am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
+am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+@ARCH_IA32_TRUE@ARCH_SOURCES = multiboot.h ia32-crt0.S ia32-cmain.c \
+@ARCH_IA32_TRUE@ ia32-output.c output-vga.c output-serial.c ia32-shutdown.c
+
+bootdir = $(prefix)/boot
+COMMON_CPPFLAGS = -I$(srcdir) -I$(top_builddir)/include \
+ -I$(top_srcdir)/libc-parts -D_GNU_SOURCE -Wall -std=gnu99 -g
+
+viengoos_CPPFLAGS = $(COMMON_CPPFLAGS) -DRM_INTERN
+viengoos_SOURCES = $(ARCH_SOURCES) \
+ output.h output.c output-none.c \
+ debug.h debug.c \
+ shutdown.h shutdown.c \
+ panic.c \
+ sigma0.h sigma0.c \
+ malloc.h malloc-wrap.c \
+ zalloc.h zalloc.c \
+ mmap.c \
+ rm.h \
+ viengoos.h viengoos.c \
+ boot-modules.h boot-modules.c \
+ memory.h memory.c \
+ object.h object.c \
+ cap.h cap.c cap-lookup.c \
+ activity.h activity.c \
+ thread.h thread.c \
+ as.h as.c \
+ bits.h \
+ elf.h loader.h loader.c \
+ server.h server.c
+
+
+# Doug Lea's malloc is included by malloc-wrap.c.
+EXTRA_viengoos_SOURCES = malloc.c
+viengoos_LDADD = -lgcc ../libc-parts/libc-parts.a \
+ ../libhurd-btree/libhurd-btree.a \
+ ../libhurd-ihash/libhurd-ihash.a
+
+viengoos_LDFLAGS = -u_start -e_start -N -nostdlib \
+ -Ttext=@HURD_RM_LOAD_ADDRESS@
+
+TESTS = t-as
+t_as_CPPFLAGS = $(viengoos_CPPFLAGS) -include $(srcdir)/t-environment.h
+t_as_SOURCES = t-as.c \
+ zalloc.h zalloc.c \
+ memory.h memory.c \
+ cap.h cap.c cap-lookup.c \
+ object.h object.c \
+ activity.h activity.c \
+ thread.h thread.c \
+ as.h as.c \
+ output.h output.c output-stdio.c \
+ shutdown.h shutdown.c \
+ panic.h panic.c \
+ debug.h debug.c
+
+t_as_LDADD = $(viengoos_LDADD)
+lib_LIBRARIES = libhurd-cap.a
+libhurd_cap_a_CPPFLAGS = $(COMMON_CPPFLAGS)
+libhurd_cap_a_SOURCES = cap-lookup.c as.c as.h
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .S .c .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu viengoos/Makefile'; \
+ cd $(top_srcdir) && \
+ $(AUTOMAKE) --gnu viengoos/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-libLIBRARIES: $(lib_LIBRARIES)
+ @$(NORMAL_INSTALL)
+ test -z "$(libdir)" || $(mkdir_p) "$(DESTDIR)$(libdir)"
+ @list='$(lib_LIBRARIES)'; for p in $$list; do \
+ if test -f $$p; then \
+ f=$(am__strip_dir) \
+ echo " $(libLIBRARIES_INSTALL) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \
+ $(libLIBRARIES_INSTALL) "$$p" "$(DESTDIR)$(libdir)/$$f"; \
+ else :; fi; \
+ done
+ @$(POST_INSTALL)
+ @list='$(lib_LIBRARIES)'; for p in $$list; do \
+ if test -f $$p; then \
+ p=$(am__strip_dir) \
+ echo " $(RANLIB) '$(DESTDIR)$(libdir)/$$p'"; \
+ $(RANLIB) "$(DESTDIR)$(libdir)/$$p"; \
+ else :; fi; \
+ done
+
+uninstall-libLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ @list='$(lib_LIBRARIES)'; for p in $$list; do \
+ p=$(am__strip_dir) \
+ echo " rm -f '$(DESTDIR)$(libdir)/$$p'"; \
+ rm -f "$(DESTDIR)$(libdir)/$$p"; \
+ done
+
+clean-libLIBRARIES:
+ -test -z "$(lib_LIBRARIES)" || rm -f $(lib_LIBRARIES)
+libhurd-cap.a: $(libhurd_cap_a_OBJECTS) $(libhurd_cap_a_DEPENDENCIES)
+ -rm -f libhurd-cap.a
+ $(libhurd_cap_a_AR) libhurd-cap.a $(libhurd_cap_a_OBJECTS) $(libhurd_cap_a_LIBADD)
+ $(RANLIB) libhurd-cap.a
+install-bootPROGRAMS: $(boot_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ test -z "$(bootdir)" || $(mkdir_p) "$(DESTDIR)$(bootdir)"
+ @list='$(boot_PROGRAMS)'; for p in $$list; do \
+ p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+ if test -f $$p \
+ ; then \
+ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \
+ echo " $(INSTALL_PROGRAM_ENV) $(bootPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bootdir)/$$f'"; \
+ $(INSTALL_PROGRAM_ENV) $(bootPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bootdir)/$$f" || exit 1; \
+ else :; fi; \
+ done
+
+uninstall-bootPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(boot_PROGRAMS)'; for p in $$list; do \
+ f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \
+ echo " rm -f '$(DESTDIR)$(bootdir)/$$f'"; \
+ rm -f "$(DESTDIR)$(bootdir)/$$f"; \
+ done
+
+clean-bootPROGRAMS:
+ -test -z "$(boot_PROGRAMS)" || rm -f $(boot_PROGRAMS)
+
+clean-checkPROGRAMS:
+ -test -z "$(check_PROGRAMS)" || rm -f $(check_PROGRAMS)
+t-as$(EXEEXT): $(t_as_OBJECTS) $(t_as_DEPENDENCIES)
+ @rm -f t-as$(EXEEXT)
+ $(LINK) $(t_as_LDFLAGS) $(t_as_OBJECTS) $(t_as_LDADD) $(LIBS)
+viengoos$(EXEEXT): $(viengoos_OBJECTS) $(viengoos_DEPENDENCIES)
+ @rm -f viengoos$(EXEEXT)
+ $(LINK) $(viengoos_LDFLAGS) $(viengoos_OBJECTS) $(viengoos_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhurd_cap_a-as.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhurd_cap_a-cap-lookup.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-activity.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-as.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-cap-lookup.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-cap.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-debug.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-memory.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-object.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-output-stdio.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-output.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-panic.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-shutdown.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-t-as.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-thread.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_as-zalloc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-activity.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-as.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-boot-modules.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-cap-lookup.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-cap.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-debug.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-ia32-cmain.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-ia32-output.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-ia32-shutdown.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-loader.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-malloc-wrap.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-malloc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-memory.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-mmap.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-object.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-output-none.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-output-serial.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-output-vga.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-output.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-panic.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-server.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-shutdown.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-sigma0.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-thread.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-viengoos.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/viengoos-zalloc.Po@am__quote@
+
+.S.o:
+ $(CCASCOMPILE) -c $<
+
+.S.obj:
+ $(CCASCOMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.o:
+@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+libhurd_cap_a-cap-lookup.o: cap-lookup.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT libhurd_cap_a-cap-lookup.o -MD -MP -MF "$(DEPDIR)/libhurd_cap_a-cap-lookup.Tpo" -c -o libhurd_cap_a-cap-lookup.o `test -f 'cap-lookup.c' || echo '$(srcdir)/'`cap-lookup.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/libhurd_cap_a-cap-lookup.Tpo" "$(DEPDIR)/libhurd_cap_a-cap-lookup.Po"; else rm -f "$(DEPDIR)/libhurd_cap_a-cap-lookup.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap-lookup.c' object='libhurd_cap_a-cap-lookup.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o libhurd_cap_a-cap-lookup.o `test -f 'cap-lookup.c' || echo '$(srcdir)/'`cap-lookup.c
+
+libhurd_cap_a-cap-lookup.obj: cap-lookup.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT libhurd_cap_a-cap-lookup.obj -MD -MP -MF "$(DEPDIR)/libhurd_cap_a-cap-lookup.Tpo" -c -o libhurd_cap_a-cap-lookup.obj `if test -f 'cap-lookup.c'; then $(CYGPATH_W) 'cap-lookup.c'; else $(CYGPATH_W) '$(srcdir)/cap-lookup.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/libhurd_cap_a-cap-lookup.Tpo" "$(DEPDIR)/libhurd_cap_a-cap-lookup.Po"; else rm -f "$(DEPDIR)/libhurd_cap_a-cap-lookup.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap-lookup.c' object='libhurd_cap_a-cap-lookup.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o libhurd_cap_a-cap-lookup.obj `if test -f 'cap-lookup.c'; then $(CYGPATH_W) 'cap-lookup.c'; else $(CYGPATH_W) '$(srcdir)/cap-lookup.c'; fi`
+
+libhurd_cap_a-as.o: as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT libhurd_cap_a-as.o -MD -MP -MF "$(DEPDIR)/libhurd_cap_a-as.Tpo" -c -o libhurd_cap_a-as.o `test -f 'as.c' || echo '$(srcdir)/'`as.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/libhurd_cap_a-as.Tpo" "$(DEPDIR)/libhurd_cap_a-as.Po"; else rm -f "$(DEPDIR)/libhurd_cap_a-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='as.c' object='libhurd_cap_a-as.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o libhurd_cap_a-as.o `test -f 'as.c' || echo '$(srcdir)/'`as.c
+
+libhurd_cap_a-as.obj: as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT libhurd_cap_a-as.obj -MD -MP -MF "$(DEPDIR)/libhurd_cap_a-as.Tpo" -c -o libhurd_cap_a-as.obj `if test -f 'as.c'; then $(CYGPATH_W) 'as.c'; else $(CYGPATH_W) '$(srcdir)/as.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/libhurd_cap_a-as.Tpo" "$(DEPDIR)/libhurd_cap_a-as.Po"; else rm -f "$(DEPDIR)/libhurd_cap_a-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='as.c' object='libhurd_cap_a-as.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhurd_cap_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o libhurd_cap_a-as.obj `if test -f 'as.c'; then $(CYGPATH_W) 'as.c'; else $(CYGPATH_W) '$(srcdir)/as.c'; fi`
+
+t_as-t-as.o: t-as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-t-as.o -MD -MP -MF "$(DEPDIR)/t_as-t-as.Tpo" -c -o t_as-t-as.o `test -f 't-as.c' || echo '$(srcdir)/'`t-as.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-t-as.Tpo" "$(DEPDIR)/t_as-t-as.Po"; else rm -f "$(DEPDIR)/t_as-t-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='t-as.c' object='t_as-t-as.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-t-as.o `test -f 't-as.c' || echo '$(srcdir)/'`t-as.c
+
+t_as-t-as.obj: t-as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-t-as.obj -MD -MP -MF "$(DEPDIR)/t_as-t-as.Tpo" -c -o t_as-t-as.obj `if test -f 't-as.c'; then $(CYGPATH_W) 't-as.c'; else $(CYGPATH_W) '$(srcdir)/t-as.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-t-as.Tpo" "$(DEPDIR)/t_as-t-as.Po"; else rm -f "$(DEPDIR)/t_as-t-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='t-as.c' object='t_as-t-as.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-t-as.obj `if test -f 't-as.c'; then $(CYGPATH_W) 't-as.c'; else $(CYGPATH_W) '$(srcdir)/t-as.c'; fi`
+
+t_as-zalloc.o: zalloc.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-zalloc.o -MD -MP -MF "$(DEPDIR)/t_as-zalloc.Tpo" -c -o t_as-zalloc.o `test -f 'zalloc.c' || echo '$(srcdir)/'`zalloc.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-zalloc.Tpo" "$(DEPDIR)/t_as-zalloc.Po"; else rm -f "$(DEPDIR)/t_as-zalloc.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='zalloc.c' object='t_as-zalloc.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-zalloc.o `test -f 'zalloc.c' || echo '$(srcdir)/'`zalloc.c
+
+t_as-zalloc.obj: zalloc.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-zalloc.obj -MD -MP -MF "$(DEPDIR)/t_as-zalloc.Tpo" -c -o t_as-zalloc.obj `if test -f 'zalloc.c'; then $(CYGPATH_W) 'zalloc.c'; else $(CYGPATH_W) '$(srcdir)/zalloc.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-zalloc.Tpo" "$(DEPDIR)/t_as-zalloc.Po"; else rm -f "$(DEPDIR)/t_as-zalloc.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='zalloc.c' object='t_as-zalloc.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-zalloc.obj `if test -f 'zalloc.c'; then $(CYGPATH_W) 'zalloc.c'; else $(CYGPATH_W) '$(srcdir)/zalloc.c'; fi`
+
+t_as-memory.o: memory.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-memory.o -MD -MP -MF "$(DEPDIR)/t_as-memory.Tpo" -c -o t_as-memory.o `test -f 'memory.c' || echo '$(srcdir)/'`memory.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-memory.Tpo" "$(DEPDIR)/t_as-memory.Po"; else rm -f "$(DEPDIR)/t_as-memory.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='memory.c' object='t_as-memory.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-memory.o `test -f 'memory.c' || echo '$(srcdir)/'`memory.c
+
+t_as-memory.obj: memory.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-memory.obj -MD -MP -MF "$(DEPDIR)/t_as-memory.Tpo" -c -o t_as-memory.obj `if test -f 'memory.c'; then $(CYGPATH_W) 'memory.c'; else $(CYGPATH_W) '$(srcdir)/memory.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-memory.Tpo" "$(DEPDIR)/t_as-memory.Po"; else rm -f "$(DEPDIR)/t_as-memory.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='memory.c' object='t_as-memory.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-memory.obj `if test -f 'memory.c'; then $(CYGPATH_W) 'memory.c'; else $(CYGPATH_W) '$(srcdir)/memory.c'; fi`
+
+t_as-cap.o: cap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-cap.o -MD -MP -MF "$(DEPDIR)/t_as-cap.Tpo" -c -o t_as-cap.o `test -f 'cap.c' || echo '$(srcdir)/'`cap.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-cap.Tpo" "$(DEPDIR)/t_as-cap.Po"; else rm -f "$(DEPDIR)/t_as-cap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap.c' object='t_as-cap.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-cap.o `test -f 'cap.c' || echo '$(srcdir)/'`cap.c
+
+t_as-cap.obj: cap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-cap.obj -MD -MP -MF "$(DEPDIR)/t_as-cap.Tpo" -c -o t_as-cap.obj `if test -f 'cap.c'; then $(CYGPATH_W) 'cap.c'; else $(CYGPATH_W) '$(srcdir)/cap.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-cap.Tpo" "$(DEPDIR)/t_as-cap.Po"; else rm -f "$(DEPDIR)/t_as-cap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap.c' object='t_as-cap.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-cap.obj `if test -f 'cap.c'; then $(CYGPATH_W) 'cap.c'; else $(CYGPATH_W) '$(srcdir)/cap.c'; fi`
+
+t_as-cap-lookup.o: cap-lookup.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-cap-lookup.o -MD -MP -MF "$(DEPDIR)/t_as-cap-lookup.Tpo" -c -o t_as-cap-lookup.o `test -f 'cap-lookup.c' || echo '$(srcdir)/'`cap-lookup.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-cap-lookup.Tpo" "$(DEPDIR)/t_as-cap-lookup.Po"; else rm -f "$(DEPDIR)/t_as-cap-lookup.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap-lookup.c' object='t_as-cap-lookup.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-cap-lookup.o `test -f 'cap-lookup.c' || echo '$(srcdir)/'`cap-lookup.c
+
+t_as-cap-lookup.obj: cap-lookup.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-cap-lookup.obj -MD -MP -MF "$(DEPDIR)/t_as-cap-lookup.Tpo" -c -o t_as-cap-lookup.obj `if test -f 'cap-lookup.c'; then $(CYGPATH_W) 'cap-lookup.c'; else $(CYGPATH_W) '$(srcdir)/cap-lookup.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-cap-lookup.Tpo" "$(DEPDIR)/t_as-cap-lookup.Po"; else rm -f "$(DEPDIR)/t_as-cap-lookup.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap-lookup.c' object='t_as-cap-lookup.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-cap-lookup.obj `if test -f 'cap-lookup.c'; then $(CYGPATH_W) 'cap-lookup.c'; else $(CYGPATH_W) '$(srcdir)/cap-lookup.c'; fi`
+
+t_as-object.o: object.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-object.o -MD -MP -MF "$(DEPDIR)/t_as-object.Tpo" -c -o t_as-object.o `test -f 'object.c' || echo '$(srcdir)/'`object.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-object.Tpo" "$(DEPDIR)/t_as-object.Po"; else rm -f "$(DEPDIR)/t_as-object.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='object.c' object='t_as-object.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-object.o `test -f 'object.c' || echo '$(srcdir)/'`object.c
+
+t_as-object.obj: object.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-object.obj -MD -MP -MF "$(DEPDIR)/t_as-object.Tpo" -c -o t_as-object.obj `if test -f 'object.c'; then $(CYGPATH_W) 'object.c'; else $(CYGPATH_W) '$(srcdir)/object.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-object.Tpo" "$(DEPDIR)/t_as-object.Po"; else rm -f "$(DEPDIR)/t_as-object.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='object.c' object='t_as-object.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-object.obj `if test -f 'object.c'; then $(CYGPATH_W) 'object.c'; else $(CYGPATH_W) '$(srcdir)/object.c'; fi`
+
+t_as-activity.o: activity.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-activity.o -MD -MP -MF "$(DEPDIR)/t_as-activity.Tpo" -c -o t_as-activity.o `test -f 'activity.c' || echo '$(srcdir)/'`activity.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-activity.Tpo" "$(DEPDIR)/t_as-activity.Po"; else rm -f "$(DEPDIR)/t_as-activity.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='activity.c' object='t_as-activity.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-activity.o `test -f 'activity.c' || echo '$(srcdir)/'`activity.c
+
+t_as-activity.obj: activity.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-activity.obj -MD -MP -MF "$(DEPDIR)/t_as-activity.Tpo" -c -o t_as-activity.obj `if test -f 'activity.c'; then $(CYGPATH_W) 'activity.c'; else $(CYGPATH_W) '$(srcdir)/activity.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-activity.Tpo" "$(DEPDIR)/t_as-activity.Po"; else rm -f "$(DEPDIR)/t_as-activity.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='activity.c' object='t_as-activity.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-activity.obj `if test -f 'activity.c'; then $(CYGPATH_W) 'activity.c'; else $(CYGPATH_W) '$(srcdir)/activity.c'; fi`
+
+t_as-thread.o: thread.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-thread.o -MD -MP -MF "$(DEPDIR)/t_as-thread.Tpo" -c -o t_as-thread.o `test -f 'thread.c' || echo '$(srcdir)/'`thread.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-thread.Tpo" "$(DEPDIR)/t_as-thread.Po"; else rm -f "$(DEPDIR)/t_as-thread.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='thread.c' object='t_as-thread.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-thread.o `test -f 'thread.c' || echo '$(srcdir)/'`thread.c
+
+t_as-thread.obj: thread.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-thread.obj -MD -MP -MF "$(DEPDIR)/t_as-thread.Tpo" -c -o t_as-thread.obj `if test -f 'thread.c'; then $(CYGPATH_W) 'thread.c'; else $(CYGPATH_W) '$(srcdir)/thread.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-thread.Tpo" "$(DEPDIR)/t_as-thread.Po"; else rm -f "$(DEPDIR)/t_as-thread.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='thread.c' object='t_as-thread.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-thread.obj `if test -f 'thread.c'; then $(CYGPATH_W) 'thread.c'; else $(CYGPATH_W) '$(srcdir)/thread.c'; fi`
+
+t_as-as.o: as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-as.o -MD -MP -MF "$(DEPDIR)/t_as-as.Tpo" -c -o t_as-as.o `test -f 'as.c' || echo '$(srcdir)/'`as.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-as.Tpo" "$(DEPDIR)/t_as-as.Po"; else rm -f "$(DEPDIR)/t_as-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='as.c' object='t_as-as.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-as.o `test -f 'as.c' || echo '$(srcdir)/'`as.c
+
+t_as-as.obj: as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-as.obj -MD -MP -MF "$(DEPDIR)/t_as-as.Tpo" -c -o t_as-as.obj `if test -f 'as.c'; then $(CYGPATH_W) 'as.c'; else $(CYGPATH_W) '$(srcdir)/as.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-as.Tpo" "$(DEPDIR)/t_as-as.Po"; else rm -f "$(DEPDIR)/t_as-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='as.c' object='t_as-as.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-as.obj `if test -f 'as.c'; then $(CYGPATH_W) 'as.c'; else $(CYGPATH_W) '$(srcdir)/as.c'; fi`
+
+t_as-output.o: output.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-output.o -MD -MP -MF "$(DEPDIR)/t_as-output.Tpo" -c -o t_as-output.o `test -f 'output.c' || echo '$(srcdir)/'`output.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-output.Tpo" "$(DEPDIR)/t_as-output.Po"; else rm -f "$(DEPDIR)/t_as-output.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output.c' object='t_as-output.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-output.o `test -f 'output.c' || echo '$(srcdir)/'`output.c
+
+t_as-output.obj: output.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-output.obj -MD -MP -MF "$(DEPDIR)/t_as-output.Tpo" -c -o t_as-output.obj `if test -f 'output.c'; then $(CYGPATH_W) 'output.c'; else $(CYGPATH_W) '$(srcdir)/output.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-output.Tpo" "$(DEPDIR)/t_as-output.Po"; else rm -f "$(DEPDIR)/t_as-output.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output.c' object='t_as-output.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-output.obj `if test -f 'output.c'; then $(CYGPATH_W) 'output.c'; else $(CYGPATH_W) '$(srcdir)/output.c'; fi`
+
+t_as-output-stdio.o: output-stdio.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-output-stdio.o -MD -MP -MF "$(DEPDIR)/t_as-output-stdio.Tpo" -c -o t_as-output-stdio.o `test -f 'output-stdio.c' || echo '$(srcdir)/'`output-stdio.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-output-stdio.Tpo" "$(DEPDIR)/t_as-output-stdio.Po"; else rm -f "$(DEPDIR)/t_as-output-stdio.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-stdio.c' object='t_as-output-stdio.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-output-stdio.o `test -f 'output-stdio.c' || echo '$(srcdir)/'`output-stdio.c
+
+t_as-output-stdio.obj: output-stdio.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-output-stdio.obj -MD -MP -MF "$(DEPDIR)/t_as-output-stdio.Tpo" -c -o t_as-output-stdio.obj `if test -f 'output-stdio.c'; then $(CYGPATH_W) 'output-stdio.c'; else $(CYGPATH_W) '$(srcdir)/output-stdio.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-output-stdio.Tpo" "$(DEPDIR)/t_as-output-stdio.Po"; else rm -f "$(DEPDIR)/t_as-output-stdio.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-stdio.c' object='t_as-output-stdio.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-output-stdio.obj `if test -f 'output-stdio.c'; then $(CYGPATH_W) 'output-stdio.c'; else $(CYGPATH_W) '$(srcdir)/output-stdio.c'; fi`
+
+t_as-shutdown.o: shutdown.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-shutdown.o -MD -MP -MF "$(DEPDIR)/t_as-shutdown.Tpo" -c -o t_as-shutdown.o `test -f 'shutdown.c' || echo '$(srcdir)/'`shutdown.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-shutdown.Tpo" "$(DEPDIR)/t_as-shutdown.Po"; else rm -f "$(DEPDIR)/t_as-shutdown.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='shutdown.c' object='t_as-shutdown.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-shutdown.o `test -f 'shutdown.c' || echo '$(srcdir)/'`shutdown.c
+
+t_as-shutdown.obj: shutdown.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-shutdown.obj -MD -MP -MF "$(DEPDIR)/t_as-shutdown.Tpo" -c -o t_as-shutdown.obj `if test -f 'shutdown.c'; then $(CYGPATH_W) 'shutdown.c'; else $(CYGPATH_W) '$(srcdir)/shutdown.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-shutdown.Tpo" "$(DEPDIR)/t_as-shutdown.Po"; else rm -f "$(DEPDIR)/t_as-shutdown.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='shutdown.c' object='t_as-shutdown.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-shutdown.obj `if test -f 'shutdown.c'; then $(CYGPATH_W) 'shutdown.c'; else $(CYGPATH_W) '$(srcdir)/shutdown.c'; fi`
+
+t_as-panic.o: panic.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-panic.o -MD -MP -MF "$(DEPDIR)/t_as-panic.Tpo" -c -o t_as-panic.o `test -f 'panic.c' || echo '$(srcdir)/'`panic.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-panic.Tpo" "$(DEPDIR)/t_as-panic.Po"; else rm -f "$(DEPDIR)/t_as-panic.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='panic.c' object='t_as-panic.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-panic.o `test -f 'panic.c' || echo '$(srcdir)/'`panic.c
+
+t_as-panic.obj: panic.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-panic.obj -MD -MP -MF "$(DEPDIR)/t_as-panic.Tpo" -c -o t_as-panic.obj `if test -f 'panic.c'; then $(CYGPATH_W) 'panic.c'; else $(CYGPATH_W) '$(srcdir)/panic.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-panic.Tpo" "$(DEPDIR)/t_as-panic.Po"; else rm -f "$(DEPDIR)/t_as-panic.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='panic.c' object='t_as-panic.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-panic.obj `if test -f 'panic.c'; then $(CYGPATH_W) 'panic.c'; else $(CYGPATH_W) '$(srcdir)/panic.c'; fi`
+
+t_as-debug.o: debug.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-debug.o -MD -MP -MF "$(DEPDIR)/t_as-debug.Tpo" -c -o t_as-debug.o `test -f 'debug.c' || echo '$(srcdir)/'`debug.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-debug.Tpo" "$(DEPDIR)/t_as-debug.Po"; else rm -f "$(DEPDIR)/t_as-debug.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='debug.c' object='t_as-debug.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-debug.o `test -f 'debug.c' || echo '$(srcdir)/'`debug.c
+
+t_as-debug.obj: debug.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT t_as-debug.obj -MD -MP -MF "$(DEPDIR)/t_as-debug.Tpo" -c -o t_as-debug.obj `if test -f 'debug.c'; then $(CYGPATH_W) 'debug.c'; else $(CYGPATH_W) '$(srcdir)/debug.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/t_as-debug.Tpo" "$(DEPDIR)/t_as-debug.Po"; else rm -f "$(DEPDIR)/t_as-debug.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='debug.c' object='t_as-debug.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(t_as_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o t_as-debug.obj `if test -f 'debug.c'; then $(CYGPATH_W) 'debug.c'; else $(CYGPATH_W) '$(srcdir)/debug.c'; fi`
+
+viengoos-ia32-cmain.o: ia32-cmain.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-ia32-cmain.o -MD -MP -MF "$(DEPDIR)/viengoos-ia32-cmain.Tpo" -c -o viengoos-ia32-cmain.o `test -f 'ia32-cmain.c' || echo '$(srcdir)/'`ia32-cmain.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-ia32-cmain.Tpo" "$(DEPDIR)/viengoos-ia32-cmain.Po"; else rm -f "$(DEPDIR)/viengoos-ia32-cmain.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ia32-cmain.c' object='viengoos-ia32-cmain.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-ia32-cmain.o `test -f 'ia32-cmain.c' || echo '$(srcdir)/'`ia32-cmain.c
+
+viengoos-ia32-cmain.obj: ia32-cmain.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-ia32-cmain.obj -MD -MP -MF "$(DEPDIR)/viengoos-ia32-cmain.Tpo" -c -o viengoos-ia32-cmain.obj `if test -f 'ia32-cmain.c'; then $(CYGPATH_W) 'ia32-cmain.c'; else $(CYGPATH_W) '$(srcdir)/ia32-cmain.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-ia32-cmain.Tpo" "$(DEPDIR)/viengoos-ia32-cmain.Po"; else rm -f "$(DEPDIR)/viengoos-ia32-cmain.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ia32-cmain.c' object='viengoos-ia32-cmain.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-ia32-cmain.obj `if test -f 'ia32-cmain.c'; then $(CYGPATH_W) 'ia32-cmain.c'; else $(CYGPATH_W) '$(srcdir)/ia32-cmain.c'; fi`
+
+viengoos-ia32-output.o: ia32-output.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-ia32-output.o -MD -MP -MF "$(DEPDIR)/viengoos-ia32-output.Tpo" -c -o viengoos-ia32-output.o `test -f 'ia32-output.c' || echo '$(srcdir)/'`ia32-output.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-ia32-output.Tpo" "$(DEPDIR)/viengoos-ia32-output.Po"; else rm -f "$(DEPDIR)/viengoos-ia32-output.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ia32-output.c' object='viengoos-ia32-output.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-ia32-output.o `test -f 'ia32-output.c' || echo '$(srcdir)/'`ia32-output.c
+
+viengoos-ia32-output.obj: ia32-output.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-ia32-output.obj -MD -MP -MF "$(DEPDIR)/viengoos-ia32-output.Tpo" -c -o viengoos-ia32-output.obj `if test -f 'ia32-output.c'; then $(CYGPATH_W) 'ia32-output.c'; else $(CYGPATH_W) '$(srcdir)/ia32-output.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-ia32-output.Tpo" "$(DEPDIR)/viengoos-ia32-output.Po"; else rm -f "$(DEPDIR)/viengoos-ia32-output.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ia32-output.c' object='viengoos-ia32-output.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-ia32-output.obj `if test -f 'ia32-output.c'; then $(CYGPATH_W) 'ia32-output.c'; else $(CYGPATH_W) '$(srcdir)/ia32-output.c'; fi`
+
+viengoos-output-vga.o: output-vga.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output-vga.o -MD -MP -MF "$(DEPDIR)/viengoos-output-vga.Tpo" -c -o viengoos-output-vga.o `test -f 'output-vga.c' || echo '$(srcdir)/'`output-vga.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output-vga.Tpo" "$(DEPDIR)/viengoos-output-vga.Po"; else rm -f "$(DEPDIR)/viengoos-output-vga.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-vga.c' object='viengoos-output-vga.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output-vga.o `test -f 'output-vga.c' || echo '$(srcdir)/'`output-vga.c
+
+viengoos-output-vga.obj: output-vga.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output-vga.obj -MD -MP -MF "$(DEPDIR)/viengoos-output-vga.Tpo" -c -o viengoos-output-vga.obj `if test -f 'output-vga.c'; then $(CYGPATH_W) 'output-vga.c'; else $(CYGPATH_W) '$(srcdir)/output-vga.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output-vga.Tpo" "$(DEPDIR)/viengoos-output-vga.Po"; else rm -f "$(DEPDIR)/viengoos-output-vga.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-vga.c' object='viengoos-output-vga.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output-vga.obj `if test -f 'output-vga.c'; then $(CYGPATH_W) 'output-vga.c'; else $(CYGPATH_W) '$(srcdir)/output-vga.c'; fi`
+
+viengoos-output-serial.o: output-serial.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output-serial.o -MD -MP -MF "$(DEPDIR)/viengoos-output-serial.Tpo" -c -o viengoos-output-serial.o `test -f 'output-serial.c' || echo '$(srcdir)/'`output-serial.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output-serial.Tpo" "$(DEPDIR)/viengoos-output-serial.Po"; else rm -f "$(DEPDIR)/viengoos-output-serial.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-serial.c' object='viengoos-output-serial.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output-serial.o `test -f 'output-serial.c' || echo '$(srcdir)/'`output-serial.c
+
+viengoos-output-serial.obj: output-serial.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output-serial.obj -MD -MP -MF "$(DEPDIR)/viengoos-output-serial.Tpo" -c -o viengoos-output-serial.obj `if test -f 'output-serial.c'; then $(CYGPATH_W) 'output-serial.c'; else $(CYGPATH_W) '$(srcdir)/output-serial.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output-serial.Tpo" "$(DEPDIR)/viengoos-output-serial.Po"; else rm -f "$(DEPDIR)/viengoos-output-serial.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-serial.c' object='viengoos-output-serial.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output-serial.obj `if test -f 'output-serial.c'; then $(CYGPATH_W) 'output-serial.c'; else $(CYGPATH_W) '$(srcdir)/output-serial.c'; fi`
+
+viengoos-ia32-shutdown.o: ia32-shutdown.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-ia32-shutdown.o -MD -MP -MF "$(DEPDIR)/viengoos-ia32-shutdown.Tpo" -c -o viengoos-ia32-shutdown.o `test -f 'ia32-shutdown.c' || echo '$(srcdir)/'`ia32-shutdown.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-ia32-shutdown.Tpo" "$(DEPDIR)/viengoos-ia32-shutdown.Po"; else rm -f "$(DEPDIR)/viengoos-ia32-shutdown.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ia32-shutdown.c' object='viengoos-ia32-shutdown.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-ia32-shutdown.o `test -f 'ia32-shutdown.c' || echo '$(srcdir)/'`ia32-shutdown.c
+
+viengoos-ia32-shutdown.obj: ia32-shutdown.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-ia32-shutdown.obj -MD -MP -MF "$(DEPDIR)/viengoos-ia32-shutdown.Tpo" -c -o viengoos-ia32-shutdown.obj `if test -f 'ia32-shutdown.c'; then $(CYGPATH_W) 'ia32-shutdown.c'; else $(CYGPATH_W) '$(srcdir)/ia32-shutdown.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-ia32-shutdown.Tpo" "$(DEPDIR)/viengoos-ia32-shutdown.Po"; else rm -f "$(DEPDIR)/viengoos-ia32-shutdown.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ia32-shutdown.c' object='viengoos-ia32-shutdown.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-ia32-shutdown.obj `if test -f 'ia32-shutdown.c'; then $(CYGPATH_W) 'ia32-shutdown.c'; else $(CYGPATH_W) '$(srcdir)/ia32-shutdown.c'; fi`
+
+viengoos-output.o: output.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output.o -MD -MP -MF "$(DEPDIR)/viengoos-output.Tpo" -c -o viengoos-output.o `test -f 'output.c' || echo '$(srcdir)/'`output.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output.Tpo" "$(DEPDIR)/viengoos-output.Po"; else rm -f "$(DEPDIR)/viengoos-output.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output.c' object='viengoos-output.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output.o `test -f 'output.c' || echo '$(srcdir)/'`output.c
+
+viengoos-output.obj: output.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output.obj -MD -MP -MF "$(DEPDIR)/viengoos-output.Tpo" -c -o viengoos-output.obj `if test -f 'output.c'; then $(CYGPATH_W) 'output.c'; else $(CYGPATH_W) '$(srcdir)/output.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output.Tpo" "$(DEPDIR)/viengoos-output.Po"; else rm -f "$(DEPDIR)/viengoos-output.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output.c' object='viengoos-output.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output.obj `if test -f 'output.c'; then $(CYGPATH_W) 'output.c'; else $(CYGPATH_W) '$(srcdir)/output.c'; fi`
+
+viengoos-output-none.o: output-none.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output-none.o -MD -MP -MF "$(DEPDIR)/viengoos-output-none.Tpo" -c -o viengoos-output-none.o `test -f 'output-none.c' || echo '$(srcdir)/'`output-none.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output-none.Tpo" "$(DEPDIR)/viengoos-output-none.Po"; else rm -f "$(DEPDIR)/viengoos-output-none.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-none.c' object='viengoos-output-none.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output-none.o `test -f 'output-none.c' || echo '$(srcdir)/'`output-none.c
+
+viengoos-output-none.obj: output-none.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-output-none.obj -MD -MP -MF "$(DEPDIR)/viengoos-output-none.Tpo" -c -o viengoos-output-none.obj `if test -f 'output-none.c'; then $(CYGPATH_W) 'output-none.c'; else $(CYGPATH_W) '$(srcdir)/output-none.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-output-none.Tpo" "$(DEPDIR)/viengoos-output-none.Po"; else rm -f "$(DEPDIR)/viengoos-output-none.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='output-none.c' object='viengoos-output-none.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-output-none.obj `if test -f 'output-none.c'; then $(CYGPATH_W) 'output-none.c'; else $(CYGPATH_W) '$(srcdir)/output-none.c'; fi`
+
+viengoos-debug.o: debug.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-debug.o -MD -MP -MF "$(DEPDIR)/viengoos-debug.Tpo" -c -o viengoos-debug.o `test -f 'debug.c' || echo '$(srcdir)/'`debug.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-debug.Tpo" "$(DEPDIR)/viengoos-debug.Po"; else rm -f "$(DEPDIR)/viengoos-debug.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='debug.c' object='viengoos-debug.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-debug.o `test -f 'debug.c' || echo '$(srcdir)/'`debug.c
+
+viengoos-debug.obj: debug.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-debug.obj -MD -MP -MF "$(DEPDIR)/viengoos-debug.Tpo" -c -o viengoos-debug.obj `if test -f 'debug.c'; then $(CYGPATH_W) 'debug.c'; else $(CYGPATH_W) '$(srcdir)/debug.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-debug.Tpo" "$(DEPDIR)/viengoos-debug.Po"; else rm -f "$(DEPDIR)/viengoos-debug.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='debug.c' object='viengoos-debug.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-debug.obj `if test -f 'debug.c'; then $(CYGPATH_W) 'debug.c'; else $(CYGPATH_W) '$(srcdir)/debug.c'; fi`
+
+viengoos-shutdown.o: shutdown.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-shutdown.o -MD -MP -MF "$(DEPDIR)/viengoos-shutdown.Tpo" -c -o viengoos-shutdown.o `test -f 'shutdown.c' || echo '$(srcdir)/'`shutdown.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-shutdown.Tpo" "$(DEPDIR)/viengoos-shutdown.Po"; else rm -f "$(DEPDIR)/viengoos-shutdown.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='shutdown.c' object='viengoos-shutdown.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-shutdown.o `test -f 'shutdown.c' || echo '$(srcdir)/'`shutdown.c
+
+viengoos-shutdown.obj: shutdown.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-shutdown.obj -MD -MP -MF "$(DEPDIR)/viengoos-shutdown.Tpo" -c -o viengoos-shutdown.obj `if test -f 'shutdown.c'; then $(CYGPATH_W) 'shutdown.c'; else $(CYGPATH_W) '$(srcdir)/shutdown.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-shutdown.Tpo" "$(DEPDIR)/viengoos-shutdown.Po"; else rm -f "$(DEPDIR)/viengoos-shutdown.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='shutdown.c' object='viengoos-shutdown.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-shutdown.obj `if test -f 'shutdown.c'; then $(CYGPATH_W) 'shutdown.c'; else $(CYGPATH_W) '$(srcdir)/shutdown.c'; fi`
+
+viengoos-panic.o: panic.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-panic.o -MD -MP -MF "$(DEPDIR)/viengoos-panic.Tpo" -c -o viengoos-panic.o `test -f 'panic.c' || echo '$(srcdir)/'`panic.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-panic.Tpo" "$(DEPDIR)/viengoos-panic.Po"; else rm -f "$(DEPDIR)/viengoos-panic.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='panic.c' object='viengoos-panic.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-panic.o `test -f 'panic.c' || echo '$(srcdir)/'`panic.c
+
+viengoos-panic.obj: panic.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-panic.obj -MD -MP -MF "$(DEPDIR)/viengoos-panic.Tpo" -c -o viengoos-panic.obj `if test -f 'panic.c'; then $(CYGPATH_W) 'panic.c'; else $(CYGPATH_W) '$(srcdir)/panic.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-panic.Tpo" "$(DEPDIR)/viengoos-panic.Po"; else rm -f "$(DEPDIR)/viengoos-panic.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='panic.c' object='viengoos-panic.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-panic.obj `if test -f 'panic.c'; then $(CYGPATH_W) 'panic.c'; else $(CYGPATH_W) '$(srcdir)/panic.c'; fi`
+
+viengoos-sigma0.o: sigma0.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-sigma0.o -MD -MP -MF "$(DEPDIR)/viengoos-sigma0.Tpo" -c -o viengoos-sigma0.o `test -f 'sigma0.c' || echo '$(srcdir)/'`sigma0.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-sigma0.Tpo" "$(DEPDIR)/viengoos-sigma0.Po"; else rm -f "$(DEPDIR)/viengoos-sigma0.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sigma0.c' object='viengoos-sigma0.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-sigma0.o `test -f 'sigma0.c' || echo '$(srcdir)/'`sigma0.c
+
+viengoos-sigma0.obj: sigma0.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-sigma0.obj -MD -MP -MF "$(DEPDIR)/viengoos-sigma0.Tpo" -c -o viengoos-sigma0.obj `if test -f 'sigma0.c'; then $(CYGPATH_W) 'sigma0.c'; else $(CYGPATH_W) '$(srcdir)/sigma0.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-sigma0.Tpo" "$(DEPDIR)/viengoos-sigma0.Po"; else rm -f "$(DEPDIR)/viengoos-sigma0.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sigma0.c' object='viengoos-sigma0.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-sigma0.obj `if test -f 'sigma0.c'; then $(CYGPATH_W) 'sigma0.c'; else $(CYGPATH_W) '$(srcdir)/sigma0.c'; fi`
+
+viengoos-malloc-wrap.o: malloc-wrap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-malloc-wrap.o -MD -MP -MF "$(DEPDIR)/viengoos-malloc-wrap.Tpo" -c -o viengoos-malloc-wrap.o `test -f 'malloc-wrap.c' || echo '$(srcdir)/'`malloc-wrap.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-malloc-wrap.Tpo" "$(DEPDIR)/viengoos-malloc-wrap.Po"; else rm -f "$(DEPDIR)/viengoos-malloc-wrap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='malloc-wrap.c' object='viengoos-malloc-wrap.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-malloc-wrap.o `test -f 'malloc-wrap.c' || echo '$(srcdir)/'`malloc-wrap.c
+
+viengoos-malloc-wrap.obj: malloc-wrap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-malloc-wrap.obj -MD -MP -MF "$(DEPDIR)/viengoos-malloc-wrap.Tpo" -c -o viengoos-malloc-wrap.obj `if test -f 'malloc-wrap.c'; then $(CYGPATH_W) 'malloc-wrap.c'; else $(CYGPATH_W) '$(srcdir)/malloc-wrap.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-malloc-wrap.Tpo" "$(DEPDIR)/viengoos-malloc-wrap.Po"; else rm -f "$(DEPDIR)/viengoos-malloc-wrap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='malloc-wrap.c' object='viengoos-malloc-wrap.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-malloc-wrap.obj `if test -f 'malloc-wrap.c'; then $(CYGPATH_W) 'malloc-wrap.c'; else $(CYGPATH_W) '$(srcdir)/malloc-wrap.c'; fi`
+
+viengoos-zalloc.o: zalloc.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-zalloc.o -MD -MP -MF "$(DEPDIR)/viengoos-zalloc.Tpo" -c -o viengoos-zalloc.o `test -f 'zalloc.c' || echo '$(srcdir)/'`zalloc.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-zalloc.Tpo" "$(DEPDIR)/viengoos-zalloc.Po"; else rm -f "$(DEPDIR)/viengoos-zalloc.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='zalloc.c' object='viengoos-zalloc.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-zalloc.o `test -f 'zalloc.c' || echo '$(srcdir)/'`zalloc.c
+
+viengoos-zalloc.obj: zalloc.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-zalloc.obj -MD -MP -MF "$(DEPDIR)/viengoos-zalloc.Tpo" -c -o viengoos-zalloc.obj `if test -f 'zalloc.c'; then $(CYGPATH_W) 'zalloc.c'; else $(CYGPATH_W) '$(srcdir)/zalloc.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-zalloc.Tpo" "$(DEPDIR)/viengoos-zalloc.Po"; else rm -f "$(DEPDIR)/viengoos-zalloc.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='zalloc.c' object='viengoos-zalloc.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-zalloc.obj `if test -f 'zalloc.c'; then $(CYGPATH_W) 'zalloc.c'; else $(CYGPATH_W) '$(srcdir)/zalloc.c'; fi`
+
+viengoos-mmap.o: mmap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-mmap.o -MD -MP -MF "$(DEPDIR)/viengoos-mmap.Tpo" -c -o viengoos-mmap.o `test -f 'mmap.c' || echo '$(srcdir)/'`mmap.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-mmap.Tpo" "$(DEPDIR)/viengoos-mmap.Po"; else rm -f "$(DEPDIR)/viengoos-mmap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='mmap.c' object='viengoos-mmap.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-mmap.o `test -f 'mmap.c' || echo '$(srcdir)/'`mmap.c
+
+viengoos-mmap.obj: mmap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-mmap.obj -MD -MP -MF "$(DEPDIR)/viengoos-mmap.Tpo" -c -o viengoos-mmap.obj `if test -f 'mmap.c'; then $(CYGPATH_W) 'mmap.c'; else $(CYGPATH_W) '$(srcdir)/mmap.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-mmap.Tpo" "$(DEPDIR)/viengoos-mmap.Po"; else rm -f "$(DEPDIR)/viengoos-mmap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='mmap.c' object='viengoos-mmap.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-mmap.obj `if test -f 'mmap.c'; then $(CYGPATH_W) 'mmap.c'; else $(CYGPATH_W) '$(srcdir)/mmap.c'; fi`
+
+viengoos-viengoos.o: viengoos.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-viengoos.o -MD -MP -MF "$(DEPDIR)/viengoos-viengoos.Tpo" -c -o viengoos-viengoos.o `test -f 'viengoos.c' || echo '$(srcdir)/'`viengoos.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-viengoos.Tpo" "$(DEPDIR)/viengoos-viengoos.Po"; else rm -f "$(DEPDIR)/viengoos-viengoos.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='viengoos.c' object='viengoos-viengoos.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-viengoos.o `test -f 'viengoos.c' || echo '$(srcdir)/'`viengoos.c
+
+viengoos-viengoos.obj: viengoos.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-viengoos.obj -MD -MP -MF "$(DEPDIR)/viengoos-viengoos.Tpo" -c -o viengoos-viengoos.obj `if test -f 'viengoos.c'; then $(CYGPATH_W) 'viengoos.c'; else $(CYGPATH_W) '$(srcdir)/viengoos.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-viengoos.Tpo" "$(DEPDIR)/viengoos-viengoos.Po"; else rm -f "$(DEPDIR)/viengoos-viengoos.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='viengoos.c' object='viengoos-viengoos.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-viengoos.obj `if test -f 'viengoos.c'; then $(CYGPATH_W) 'viengoos.c'; else $(CYGPATH_W) '$(srcdir)/viengoos.c'; fi`
+
+viengoos-boot-modules.o: boot-modules.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-boot-modules.o -MD -MP -MF "$(DEPDIR)/viengoos-boot-modules.Tpo" -c -o viengoos-boot-modules.o `test -f 'boot-modules.c' || echo '$(srcdir)/'`boot-modules.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-boot-modules.Tpo" "$(DEPDIR)/viengoos-boot-modules.Po"; else rm -f "$(DEPDIR)/viengoos-boot-modules.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='boot-modules.c' object='viengoos-boot-modules.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-boot-modules.o `test -f 'boot-modules.c' || echo '$(srcdir)/'`boot-modules.c
+
+viengoos-boot-modules.obj: boot-modules.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-boot-modules.obj -MD -MP -MF "$(DEPDIR)/viengoos-boot-modules.Tpo" -c -o viengoos-boot-modules.obj `if test -f 'boot-modules.c'; then $(CYGPATH_W) 'boot-modules.c'; else $(CYGPATH_W) '$(srcdir)/boot-modules.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-boot-modules.Tpo" "$(DEPDIR)/viengoos-boot-modules.Po"; else rm -f "$(DEPDIR)/viengoos-boot-modules.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='boot-modules.c' object='viengoos-boot-modules.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-boot-modules.obj `if test -f 'boot-modules.c'; then $(CYGPATH_W) 'boot-modules.c'; else $(CYGPATH_W) '$(srcdir)/boot-modules.c'; fi`
+
+viengoos-memory.o: memory.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-memory.o -MD -MP -MF "$(DEPDIR)/viengoos-memory.Tpo" -c -o viengoos-memory.o `test -f 'memory.c' || echo '$(srcdir)/'`memory.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-memory.Tpo" "$(DEPDIR)/viengoos-memory.Po"; else rm -f "$(DEPDIR)/viengoos-memory.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='memory.c' object='viengoos-memory.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-memory.o `test -f 'memory.c' || echo '$(srcdir)/'`memory.c
+
+viengoos-memory.obj: memory.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-memory.obj -MD -MP -MF "$(DEPDIR)/viengoos-memory.Tpo" -c -o viengoos-memory.obj `if test -f 'memory.c'; then $(CYGPATH_W) 'memory.c'; else $(CYGPATH_W) '$(srcdir)/memory.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-memory.Tpo" "$(DEPDIR)/viengoos-memory.Po"; else rm -f "$(DEPDIR)/viengoos-memory.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='memory.c' object='viengoos-memory.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-memory.obj `if test -f 'memory.c'; then $(CYGPATH_W) 'memory.c'; else $(CYGPATH_W) '$(srcdir)/memory.c'; fi`
+
+viengoos-object.o: object.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-object.o -MD -MP -MF "$(DEPDIR)/viengoos-object.Tpo" -c -o viengoos-object.o `test -f 'object.c' || echo '$(srcdir)/'`object.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-object.Tpo" "$(DEPDIR)/viengoos-object.Po"; else rm -f "$(DEPDIR)/viengoos-object.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='object.c' object='viengoos-object.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-object.o `test -f 'object.c' || echo '$(srcdir)/'`object.c
+
+viengoos-object.obj: object.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-object.obj -MD -MP -MF "$(DEPDIR)/viengoos-object.Tpo" -c -o viengoos-object.obj `if test -f 'object.c'; then $(CYGPATH_W) 'object.c'; else $(CYGPATH_W) '$(srcdir)/object.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-object.Tpo" "$(DEPDIR)/viengoos-object.Po"; else rm -f "$(DEPDIR)/viengoos-object.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='object.c' object='viengoos-object.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-object.obj `if test -f 'object.c'; then $(CYGPATH_W) 'object.c'; else $(CYGPATH_W) '$(srcdir)/object.c'; fi`
+
+viengoos-cap.o: cap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-cap.o -MD -MP -MF "$(DEPDIR)/viengoos-cap.Tpo" -c -o viengoos-cap.o `test -f 'cap.c' || echo '$(srcdir)/'`cap.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-cap.Tpo" "$(DEPDIR)/viengoos-cap.Po"; else rm -f "$(DEPDIR)/viengoos-cap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap.c' object='viengoos-cap.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-cap.o `test -f 'cap.c' || echo '$(srcdir)/'`cap.c
+
+viengoos-cap.obj: cap.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-cap.obj -MD -MP -MF "$(DEPDIR)/viengoos-cap.Tpo" -c -o viengoos-cap.obj `if test -f 'cap.c'; then $(CYGPATH_W) 'cap.c'; else $(CYGPATH_W) '$(srcdir)/cap.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-cap.Tpo" "$(DEPDIR)/viengoos-cap.Po"; else rm -f "$(DEPDIR)/viengoos-cap.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap.c' object='viengoos-cap.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-cap.obj `if test -f 'cap.c'; then $(CYGPATH_W) 'cap.c'; else $(CYGPATH_W) '$(srcdir)/cap.c'; fi`
+
+viengoos-cap-lookup.o: cap-lookup.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-cap-lookup.o -MD -MP -MF "$(DEPDIR)/viengoos-cap-lookup.Tpo" -c -o viengoos-cap-lookup.o `test -f 'cap-lookup.c' || echo '$(srcdir)/'`cap-lookup.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-cap-lookup.Tpo" "$(DEPDIR)/viengoos-cap-lookup.Po"; else rm -f "$(DEPDIR)/viengoos-cap-lookup.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap-lookup.c' object='viengoos-cap-lookup.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-cap-lookup.o `test -f 'cap-lookup.c' || echo '$(srcdir)/'`cap-lookup.c
+
+viengoos-cap-lookup.obj: cap-lookup.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-cap-lookup.obj -MD -MP -MF "$(DEPDIR)/viengoos-cap-lookup.Tpo" -c -o viengoos-cap-lookup.obj `if test -f 'cap-lookup.c'; then $(CYGPATH_W) 'cap-lookup.c'; else $(CYGPATH_W) '$(srcdir)/cap-lookup.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-cap-lookup.Tpo" "$(DEPDIR)/viengoos-cap-lookup.Po"; else rm -f "$(DEPDIR)/viengoos-cap-lookup.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='cap-lookup.c' object='viengoos-cap-lookup.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-cap-lookup.obj `if test -f 'cap-lookup.c'; then $(CYGPATH_W) 'cap-lookup.c'; else $(CYGPATH_W) '$(srcdir)/cap-lookup.c'; fi`
+
+viengoos-activity.o: activity.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-activity.o -MD -MP -MF "$(DEPDIR)/viengoos-activity.Tpo" -c -o viengoos-activity.o `test -f 'activity.c' || echo '$(srcdir)/'`activity.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-activity.Tpo" "$(DEPDIR)/viengoos-activity.Po"; else rm -f "$(DEPDIR)/viengoos-activity.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='activity.c' object='viengoos-activity.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-activity.o `test -f 'activity.c' || echo '$(srcdir)/'`activity.c
+
+viengoos-activity.obj: activity.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-activity.obj -MD -MP -MF "$(DEPDIR)/viengoos-activity.Tpo" -c -o viengoos-activity.obj `if test -f 'activity.c'; then $(CYGPATH_W) 'activity.c'; else $(CYGPATH_W) '$(srcdir)/activity.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-activity.Tpo" "$(DEPDIR)/viengoos-activity.Po"; else rm -f "$(DEPDIR)/viengoos-activity.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='activity.c' object='viengoos-activity.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-activity.obj `if test -f 'activity.c'; then $(CYGPATH_W) 'activity.c'; else $(CYGPATH_W) '$(srcdir)/activity.c'; fi`
+
+viengoos-thread.o: thread.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-thread.o -MD -MP -MF "$(DEPDIR)/viengoos-thread.Tpo" -c -o viengoos-thread.o `test -f 'thread.c' || echo '$(srcdir)/'`thread.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-thread.Tpo" "$(DEPDIR)/viengoos-thread.Po"; else rm -f "$(DEPDIR)/viengoos-thread.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='thread.c' object='viengoos-thread.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-thread.o `test -f 'thread.c' || echo '$(srcdir)/'`thread.c
+
+viengoos-thread.obj: thread.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-thread.obj -MD -MP -MF "$(DEPDIR)/viengoos-thread.Tpo" -c -o viengoos-thread.obj `if test -f 'thread.c'; then $(CYGPATH_W) 'thread.c'; else $(CYGPATH_W) '$(srcdir)/thread.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-thread.Tpo" "$(DEPDIR)/viengoos-thread.Po"; else rm -f "$(DEPDIR)/viengoos-thread.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='thread.c' object='viengoos-thread.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-thread.obj `if test -f 'thread.c'; then $(CYGPATH_W) 'thread.c'; else $(CYGPATH_W) '$(srcdir)/thread.c'; fi`
+
+viengoos-as.o: as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-as.o -MD -MP -MF "$(DEPDIR)/viengoos-as.Tpo" -c -o viengoos-as.o `test -f 'as.c' || echo '$(srcdir)/'`as.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-as.Tpo" "$(DEPDIR)/viengoos-as.Po"; else rm -f "$(DEPDIR)/viengoos-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='as.c' object='viengoos-as.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-as.o `test -f 'as.c' || echo '$(srcdir)/'`as.c
+
+viengoos-as.obj: as.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-as.obj -MD -MP -MF "$(DEPDIR)/viengoos-as.Tpo" -c -o viengoos-as.obj `if test -f 'as.c'; then $(CYGPATH_W) 'as.c'; else $(CYGPATH_W) '$(srcdir)/as.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-as.Tpo" "$(DEPDIR)/viengoos-as.Po"; else rm -f "$(DEPDIR)/viengoos-as.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='as.c' object='viengoos-as.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-as.obj `if test -f 'as.c'; then $(CYGPATH_W) 'as.c'; else $(CYGPATH_W) '$(srcdir)/as.c'; fi`
+
+viengoos-loader.o: loader.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-loader.o -MD -MP -MF "$(DEPDIR)/viengoos-loader.Tpo" -c -o viengoos-loader.o `test -f 'loader.c' || echo '$(srcdir)/'`loader.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-loader.Tpo" "$(DEPDIR)/viengoos-loader.Po"; else rm -f "$(DEPDIR)/viengoos-loader.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='loader.c' object='viengoos-loader.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-loader.o `test -f 'loader.c' || echo '$(srcdir)/'`loader.c
+
+viengoos-loader.obj: loader.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-loader.obj -MD -MP -MF "$(DEPDIR)/viengoos-loader.Tpo" -c -o viengoos-loader.obj `if test -f 'loader.c'; then $(CYGPATH_W) 'loader.c'; else $(CYGPATH_W) '$(srcdir)/loader.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-loader.Tpo" "$(DEPDIR)/viengoos-loader.Po"; else rm -f "$(DEPDIR)/viengoos-loader.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='loader.c' object='viengoos-loader.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-loader.obj `if test -f 'loader.c'; then $(CYGPATH_W) 'loader.c'; else $(CYGPATH_W) '$(srcdir)/loader.c'; fi`
+
+viengoos-server.o: server.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-server.o -MD -MP -MF "$(DEPDIR)/viengoos-server.Tpo" -c -o viengoos-server.o `test -f 'server.c' || echo '$(srcdir)/'`server.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-server.Tpo" "$(DEPDIR)/viengoos-server.Po"; else rm -f "$(DEPDIR)/viengoos-server.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='server.c' object='viengoos-server.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-server.o `test -f 'server.c' || echo '$(srcdir)/'`server.c
+
+viengoos-server.obj: server.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-server.obj -MD -MP -MF "$(DEPDIR)/viengoos-server.Tpo" -c -o viengoos-server.obj `if test -f 'server.c'; then $(CYGPATH_W) 'server.c'; else $(CYGPATH_W) '$(srcdir)/server.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-server.Tpo" "$(DEPDIR)/viengoos-server.Po"; else rm -f "$(DEPDIR)/viengoos-server.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='server.c' object='viengoos-server.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-server.obj `if test -f 'server.c'; then $(CYGPATH_W) 'server.c'; else $(CYGPATH_W) '$(srcdir)/server.c'; fi`
+
+viengoos-malloc.o: malloc.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-malloc.o -MD -MP -MF "$(DEPDIR)/viengoos-malloc.Tpo" -c -o viengoos-malloc.o `test -f 'malloc.c' || echo '$(srcdir)/'`malloc.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-malloc.Tpo" "$(DEPDIR)/viengoos-malloc.Po"; else rm -f "$(DEPDIR)/viengoos-malloc.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='malloc.c' object='viengoos-malloc.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-malloc.o `test -f 'malloc.c' || echo '$(srcdir)/'`malloc.c
+
+viengoos-malloc.obj: malloc.c
+@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT viengoos-malloc.obj -MD -MP -MF "$(DEPDIR)/viengoos-malloc.Tpo" -c -o viengoos-malloc.obj `if test -f 'malloc.c'; then $(CYGPATH_W) 'malloc.c'; else $(CYGPATH_W) '$(srcdir)/malloc.c'; fi`; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/viengoos-malloc.Tpo" "$(DEPDIR)/viengoos-malloc.Po"; else rm -f "$(DEPDIR)/viengoos-malloc.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='malloc.c' object='viengoos-malloc.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(viengoos_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o viengoos-malloc.obj `if test -f 'malloc.c'; then $(CYGPATH_W) 'malloc.c'; else $(CYGPATH_W) '$(srcdir)/malloc.c'; fi`
+uninstall-info-am:
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$tags $$unique; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(CTAGS_ARGS)$$tags$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$tags $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && cd $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+check-TESTS: $(TESTS)
+ @failed=0; all=0; xfail=0; xpass=0; skip=0; \
+ srcdir=$(srcdir); export srcdir; \
+ list='$(TESTS)'; \
+ if test -n "$$list"; then \
+ for tst in $$list; do \
+ if test -f ./$$tst; then dir=./; \
+ elif test -f $$tst; then dir=; \
+ else dir="$(srcdir)/"; fi; \
+ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \
+ all=`expr $$all + 1`; \
+ case " $(XFAIL_TESTS) " in \
+ *" $$tst "*) \
+ xpass=`expr $$xpass + 1`; \
+ failed=`expr $$failed + 1`; \
+ echo "XPASS: $$tst"; \
+ ;; \
+ *) \
+ echo "PASS: $$tst"; \
+ ;; \
+ esac; \
+ elif test $$? -ne 77; then \
+ all=`expr $$all + 1`; \
+ case " $(XFAIL_TESTS) " in \
+ *" $$tst "*) \
+ xfail=`expr $$xfail + 1`; \
+ echo "XFAIL: $$tst"; \
+ ;; \
+ *) \
+ failed=`expr $$failed + 1`; \
+ echo "FAIL: $$tst"; \
+ ;; \
+ esac; \
+ else \
+ skip=`expr $$skip + 1`; \
+ echo "SKIP: $$tst"; \
+ fi; \
+ done; \
+ if test "$$failed" -eq 0; then \
+ if test "$$xfail" -eq 0; then \
+ banner="All $$all tests passed"; \
+ else \
+ banner="All $$all tests behaved as expected ($$xfail expected failures)"; \
+ fi; \
+ else \
+ if test "$$xpass" -eq 0; then \
+ banner="$$failed of $$all tests failed"; \
+ else \
+ banner="$$failed of $$all tests did not behave as expected ($$xpass unexpected passes)"; \
+ fi; \
+ fi; \
+ dashes="$$banner"; \
+ skipped=""; \
+ if test "$$skip" -ne 0; then \
+ skipped="($$skip tests were not run)"; \
+ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \
+ dashes="$$skipped"; \
+ fi; \
+ report=""; \
+ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \
+ report="Please report to $(PACKAGE_BUGREPORT)"; \
+ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \
+ dashes="$$report"; \
+ fi; \
+ dashes=`echo "$$dashes" | sed s/./=/g`; \
+ echo "$$dashes"; \
+ echo "$$banner"; \
+ test -z "$$skipped" || echo "$$skipped"; \
+ test -z "$$report" || echo "$$report"; \
+ echo "$$dashes"; \
+ test "$$failed" -eq 0; \
+ else :; fi
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
+ list='$(DISTFILES)'; for file in $$list; do \
+ case $$file in \
+ $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
+ $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
+ esac; \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test "$$dir" != "$$file" && test "$$dir" != "."; then \
+ dir="/$$dir"; \
+ $(mkdir_p) "$(distdir)$$dir"; \
+ else \
+ dir=''; \
+ fi; \
+ if test -d $$d/$$file; then \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+ fi; \
+ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+ else \
+ test -f $(distdir)/$$file \
+ || cp -p $$d/$$file $(distdir)/$$file \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+ $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS)
+ $(MAKE) $(AM_MAKEFLAGS) check-TESTS
+check: check-am
+all-am: Makefile $(LIBRARIES) $(PROGRAMS)
+installdirs:
+ for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bootdir)"; do \
+ test -z "$$dir" || $(mkdir_p) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-bootPROGRAMS clean-checkPROGRAMS clean-generic \
+ clean-libLIBRARIES mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am: install-bootPROGRAMS
+
+install-exec-am: install-libLIBRARIES
+
+install-info: install-info-am
+
+install-man:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-bootPROGRAMS uninstall-info-am \
+ uninstall-libLIBRARIES
+
+.PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \
+ clean-bootPROGRAMS clean-checkPROGRAMS clean-generic \
+ clean-libLIBRARIES ctags distclean distclean-compile \
+ distclean-generic distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-bootPROGRAMS \
+ install-data install-data-am install-exec install-exec-am \
+ install-info install-info-am install-libLIBRARIES install-man \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am uninstall-bootPROGRAMS \
+ uninstall-info-am uninstall-libLIBRARIES
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/viengoos/activity.c b/viengoos/activity.c
new file mode 100644
index 0000000..3432461
--- /dev/null
+++ b/viengoos/activity.c
@@ -0,0 +1,137 @@
+/* activity.c - Activity object implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <errno.h>
+#include <assert.h>
+#include <hurd/cap.h>
+
+#include "activity.h"
+#include "object.h"
+
+error_t
+activity_allocate (struct activity *parent,
+ struct thread *caller,
+ addr_t faddr, l4_word_t index,
+ addr_t aaddr, addr_t caddr,
+ l4_word_t priority, l4_word_t weight,
+ l4_word_t storage_quota)
+{
+ if (! (0 <= index && index < FOLIO_OBJECTS))
+ return EINVAL;
+
+ struct cap folio_cap = object_lookup_rel (parent, &caller->aspace,
+ faddr, cap_folio, NULL);
+ if (folio_cap.type == cap_void)
+ return ENOENT;
+ struct object *folio = cap_to_object (parent, &folio_cap);
+ if (! folio)
+ return ENOENT;
+
+ struct cap *acap = slot_lookup_rel (parent, &caller->aspace, aaddr,
+ -1, NULL);
+ if (! acap)
+ return ENOENT;
+ struct cap *ccap = slot_lookup_rel (parent, &caller->aspace, caddr,
+ -1, NULL);
+ if (! ccap)
+ return ENOENT;
+
+ struct object *o;
+ folio_object_alloc (parent, (struct folio *) folio, index,
+ cap_activity, &o);
+ struct activity *activity = (struct activity *) o;
+ *ccap = *acap = object_to_cap (o);
+ ccap->type = cap_activity_control;
+
+ activity->priority = priority;
+ activity->weight = weight;
+ activity->storage_quota = storage_quota;
+
+ return 0;
+}
+
+void
+activity_destroy (struct activity *activity,
+ struct cap *cap, struct activity *target)
+{
+ /* XXX: If we implement storage reparenting, we need to be careful
+ to avoid a recursive loop as an activity's storage may be stored
+ in a folio allocated to itself. */
+ assert (! cap || cap->type == cap_activity_control);
+
+ /* We should never destroy the root activity. */
+ if (target->parent.type == cap_void)
+ panic ("Request to destroy root activity");
+
+ /* XXX: Rewrite this to avoid recusion!!! */
+
+ /* Destroy all folios allocated to this activity. */
+ while (target->folios.type != cap_void)
+ {
+ struct object *f = cap_to_object (activity, &target->folios);
+ /* If F was destroyed, it should have been removed from its
+ respective activity's allocation list. */
+ assert (f);
+ folio_free (activity, (struct folio *) f);
+ }
+
+ /* Activity's that are sub-activity's of ACTIVITY are not
+ necessarily allocated out of storage allocated to ACTIVITY. */
+ while (target->children.type != cap_void)
+ {
+ struct object *a = cap_to_object (activity, &activity->children);
+ /* If A was destroyed, it should have been removed from its
+ respective activity's allocation list. */
+ assert (a);
+ activity_destroy (target, NULL, (struct activity *) a);
+ }
+
+ /* Remove from parent's activity list. */
+ struct activity *prev = NULL;
+ if (target->sibling_prev.type != cap_void)
+ prev = (struct activity *) cap_to_object (activity, &target->sibling_prev);
+
+ struct activity *next = NULL;
+ if (target->sibling_next.type != cap_void)
+ next = (struct activity *) cap_to_object (activity, &target->sibling_next);
+
+ struct activity *p
+ = (struct activity *) cap_to_object (activity, &target->parent);
+ assert (p);
+ struct object_desc *pdesc = object_to_object_desc ((struct object *) p);
+
+ if (prev)
+ prev->sibling_next = target->sibling_next;
+ else
+ {
+ assert (p->children.oid == pdesc->oid);
+ assert (p->children.version == pdesc->version);
+ }
+
+ if (next)
+ {
+ next->sibling_prev = target->sibling_prev;
+ if (! prev)
+ /* NEXT is new head. */
+ p->children = activity->sibling_next;
+ }
+
+ object_free (activity, (struct object *) target);
+}
diff --git a/viengoos/activity.h b/viengoos/activity.h
new file mode 100644
index 0000000..64b557a
--- /dev/null
+++ b/viengoos/activity.h
@@ -0,0 +1,91 @@
+/* activity.h - Activity object implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_ACTIVITY_H
+#define RM_ACTIVITY_H
+
+#include <l4.h>
+#include <errno.h>
+
+#include "cap.h"
+
+/* Forward. */
+struct object_desc;
+struct thread;
+
+struct activity
+{
+ /* On-disk data. */
+
+ /* Parent activity. */
+ struct cap parent;
+
+ /* List of child activities (if any). Threaded via
+ SIBLING_NEXT. */
+ struct cap children;
+
+ /* This activity's siblings. */
+ struct cap sibling_next;
+ struct cap sibling_prev;
+
+ /* Head of the linked list of folios allocated to this activity. */
+ struct cap folios;
+
+ /* Parent assigned values. */
+ /* Memory. */
+ l4_word_t priority;
+ l4_word_t weight;
+ /* Maximum number of folios this activity may allocate. 0 means no
+ limit. */
+ l4_word_t storage_quota;
+
+ /* Number of folios allocated to this activity (including
+ children). */
+ l4_word_t folio_count;
+
+ /* The remainder of the elements are in-memory only. */
+
+ /* Head of list of objects owned by this activity. */
+ struct object_desc *objects;
+
+ /* Number of frames allocated to this activity (including
+ children). */
+ int frames;
+};
+
+/* Allocate a new activity. Charge to activity PARENT, which is the
+ parent. FOLIO specifies the capability slot in CALLER's address
+ space that contains the folio to use to allocate the storage and
+ INDEX specifies which in the folio to use. ACTIVITY and CONTROL
+ specify where to store the capabilities designating the new
+ activity and the activity's control capability, respectively.
+ PRIORITY, WEIGHT and STORAGE_QUOTA are the initial priority and
+ weight of the activity. */
+extern error_t activity_allocate (struct activity *parent,
+ struct thread *caller,
+ addr_t folio, l4_word_t index,
+ addr_t activity, addr_t control,
+ l4_word_t priority, l4_word_t weight,
+ l4_word_t storage_quota);
+
+extern void activity_destroy (struct activity *activity,
+ struct cap *cap, struct activity *target);
+
+#endif
diff --git a/viengoos/as.c b/viengoos/as.c
new file mode 100644
index 0000000..6c87872
--- /dev/null
+++ b/viengoos/as.c
@@ -0,0 +1,550 @@
+/* as.c - Address space composition helper functions.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <l4.h>
+#include <stddef.h>
+#include <assert.h>
+
+#include <hurd/cap.h>
+#include <hurd/stddef.h>
+#include <hurd/folio.h>
+
+#include "as.h"
+#include "bits.h"
+
+#ifdef RM_INTERN
+#include "object.h"
+#endif
+
+/* Build the address space such that A designates a capability slot.
+ If MAY_OVERWRITE is true, may overwrite an existing capability.
+ Otherwise, the capability slot is expected to contain a void
+ capability. */
+static struct cap *
+as_build_internal (activity_t activity,
+ struct cap *root, addr_t a,
+ struct as_insert_rt (*allocate_object) (enum cap_type type,
+ addr_t addr),
+ bool may_overwrite)
+{
+ struct cap *start = root;
+
+ assert (! ADDR_IS_VOID (a));
+
+ l4_uint64_t addr = addr_prefix (a);
+ l4_word_t remaining = addr_depth (a);
+
+ debug (4, "Ensuring slot at 0x%llx/%d", addr, remaining);
+
+ /* The REMAINING bits to translates are in the REMAINING most significant
+ bits of ADDR. Here it is more convenient to have them in the
+ lower bits. */
+ addr >>= (ADDR_BITS - remaining);
+
+ struct cap fake_slot;
+
+ do
+ {
+ struct object *cappage = NULL;
+
+ l4_uint64_t root_guard = CAP_GUARD (root);
+ int root_gbits = CAP_GUARD_BITS (root);
+ if (root->type != cap_void
+ && remaining >= root_gbits
+ && root_guard == extract_bits64_inv (addr,
+ remaining - 1, root_gbits))
+ /* ROOT's (possibly zero-width) guard matches and thus
+ translates part of the address. */
+ {
+ /* Subtract the number of bits the guard translates. */
+ remaining -= root_gbits;
+ assert (remaining >= 0);
+
+ if (remaining == 0)
+ /* ROOT is not a void capability yet the guard translates
+ all of the bits. This means that ROOT references an
+ object at ADDR. This is a problem: we want to insert a
+ capability at ADDR. */
+ {
+ as_dump_from (activity, start, __func__);
+ panic ("There is already a %s object at %llx/%d!",
+ cap_type_string (root->type),
+ addr_prefix (a), addr_depth (a));
+ }
+
+ switch (root->type)
+ {
+ case cap_cappage:
+ case cap_rcappage:
+ /* Load the referenced object. */
+ cappage = cap_to_object (activity, root);
+ if (! cappage)
+ /* ROOT's type was not void but its designation was
+ invalid. This can only happen if we inserted an object
+ and subsequently destroyed it. */
+ {
+ /* The type should now have been set to cap_void. */
+ assert (root->type == cap_void);
+ as_dump_from (activity, root, __func__);
+ panic ("Lost object at %llx/%d",
+ addr_prefix (a), addr_depth (a) - remaining);
+ }
+
+ /* We index CAPPAGE below. */
+ break;
+
+ case cap_folio:
+ {
+ if (remaining < FOLIO_OBJECTS_LOG2)
+ panic ("Translating " ADDR_FMT "; not enough bits (%d) "
+ "to index folio at " ADDR_FMT,
+ ADDR_PRINTF (a), remaining,
+ ADDR_PRINTF (addr_chop (a, remaining)));
+
+ struct object *object = cap_to_object (activity, root);
+#ifdef RM_INTERN
+ if (! object)
+ {
+ debug (1, "Failed to get object with OID %llx",
+ root->oid);
+ return false;
+ }
+#else
+ assert (object);
+#endif
+
+ struct folio *folio = (struct folio *) object;
+
+ int i = extract_bits64_inv (addr,
+ remaining - 1, FOLIO_OBJECTS_LOG2);
+ if (folio->objects[i].type == cap_void)
+ panic ("Translating %llx/%d; indexed folio /%d object void",
+ addr_prefix (a), addr_depth (a),
+ ADDR_BITS - remaining);
+
+ root = &fake_slot;
+
+#ifdef RM_INTERN
+ struct object_desc *fdesc;
+ fdesc = object_to_object_desc (object);
+
+ object = object_find (activity, fdesc->oid + i + 1);
+ assert (object);
+ *root = object_to_cap (object);
+#else
+ /* We don't use cap_copy as we just need a byte
+ copy. */
+ *root = folio->objects[i];
+#endif
+
+ remaining -= FOLIO_OBJECTS_LOG2;
+ continue;
+ }
+
+ default:
+ as_dump_from (activity, start, __func__);
+ panic ("Can't insert object at %llx/%d: "
+ "%s at 0x%llx/%d does not translate address bits",
+ addr_prefix (a), addr_depth (a),
+ cap_type_string (root->type),
+ addr_prefix (a), addr_depth (a) - remaining);
+ }
+ }
+ else
+ /* We can get here due to two scenarios: ROOT is void or the
+ the addresses at which we want to insert the object does
+ not match the guard at ROOT. Perhaps in the former and
+ definately in the latter, we need to introduce a level of
+ indirection.
+
+ R - ROOT
+ E - ENTRY
+ C - new cappage
+
+ | <-root_depth-> | mismatch -> | <- gbits
+ | | <- match C <- new page table
+ R R / \ <- common guard,
+ | | R \ index and
+ o o | \ remaining guard
+ / \ / | \ o E
+ o o o E o / \
+ ^ o o
+ just insert */
+ {
+ /* For convenience, we prefer that the remaining bits be
+ multiple of 8. This is useful as when we insert another
+ page that conflicts with the guard, we can trivially make
+ use of a full cappage rather than a subppage, moreover,
+ it ensures that as paths are decompressed, the tree
+ remains shallow.
+
+ Consider an AS with a single page, the root having a
+ 20-bit guard:
+
+ o
+ | <- 20 bit guard
+ o <- page
+
+ If we insert another page and there is a common guard of
+ 1-bit, we could reuse this bit:
+
+ o
+ | <--- 1 bit guard
+ o <--- 8-bit cappage
+ / \ <-- 11-bit guards
+ o o <- pages
+
+ The problem with this is when we insert a third page that
+ does not share the guard:
+
+ o
+ |
+ o <- 1-bit subpage
+ / \
+ o o <- 8-bit cappage
+ / \ | <- 11-bit guards
+ o o o
+
+ In this case, we would prefer a guard of 4 at the top.
+
+ Managing the tree would also become a pain when removing
+ entries. */
+
+ /* The number of bits until the next object. */
+ int tilobject;
+
+ /* GBITS is the amount of guard that we use to point to the
+ cappage we will allocate.
+
+ REMAINER - GBITS - log2 (sizeof (cappage)) is the guard
+ length of each entry in the new page. */
+ int gbits;
+ if (root->type == cap_void)
+ {
+ int space = l4_msb64 (extract_bits64 (addr, 0, remaining));
+ if (space <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ /* The slot is available and the remaining bits to
+ translate fit in the guard. */
+ break;
+
+ gbits = tilobject = remaining;
+ }
+ else
+ /* Find the size of the common prefix. */
+ {
+ l4_uint64_t a = root_guard;
+ int max = root_gbits > remaining ? remaining : root_gbits;
+ l4_uint64_t b = extract_bits64_inv (addr, remaining - 1, max);
+ if (remaining < root_gbits)
+ a >>= root_gbits - remaining;
+
+ gbits = max - l4_msb64 (a ^ b);
+
+ tilobject = root_gbits;
+ }
+
+ /* Make sure that the guard to use fits in the guard
+ area. */
+ int firstset = l4_msb64 (extract_bits64_inv (addr,
+ remaining - 1, gbits));
+ if (firstset > CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ /* FIRSTSET is the first (most significant) non-zero guard
+ bit. GBITS - FIRSTSET are the number of zero bits
+ before the most significant non-zero bit. We can
+ include all of the initial zero bits plus up to the
+ next CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. */
+ gbits = (gbits - firstset) + CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
+
+ /* We want to choose the guard length such that the cappage
+ that we insert occurs at a natural positions so as to
+ avoid partial cappages or painful rearrangements of the
+ tree. Thus, we want the total remaining bits to
+ translate after the guard be equal to PAGESIZE_LOG2 + i *
+ CAPPAGE_SLOTS_LOG2 where i > 0. As GBITS is maximal, we
+ have to remove guard bits to achieve this. */
+ int untranslated_bits = remaining + ADDR_BITS - addr_depth (a);
+ if (untranslated_bits > PAGESIZE_LOG2)
+ /* There are more bits than a data page's worth of
+ translation bits. */
+ {
+ int to_remove = -1;
+ if (untranslated_bits - gbits >= PAGESIZE_LOG2)
+ {
+ to_remove = CAPPAGE_SLOTS_LOG2
+ - ((untranslated_bits - gbits - PAGESIZE_LOG2)
+ % CAPPAGE_SLOTS_LOG2);
+
+ if (to_remove <= gbits)
+ gbits -= to_remove;
+ else
+ gbits = 0;
+ }
+ else
+ /* Insert a cappage at /ADDR_BITS-PAGESIZE_LOG2. */
+ gbits = untranslated_bits - PAGESIZE_LOG2;
+ }
+
+ /* Account the bits translated by the guard. */
+ remaining -= gbits;
+
+ /* Log of the size of the required subpage (number of bits a
+ subpage translates). */
+ int subpage_bits = tilobject - gbits;
+ if (subpage_bits > CAPPAGE_SLOTS_LOG2)
+ /* A cappage translates maximally CAPPAGE_SLOTS_LOG2-bits. */
+ subpage_bits = CAPPAGE_SLOTS_LOG2;
+ assert (subpage_bits > 0);
+
+ /* Allocate new cappage and rearrange the tree. */
+ /* XXX: If we use a subpage, we just ignore the rest of the
+ page. This is a bit of a waste but makes the code
+ simpler. */
+ /* ALLOCATE_OBJECT wants the number of significant bits
+ translated to this object; REMAINING is number of bits
+ remaining to translate. */
+ addr_t cappage_addr = addr_chop (a, remaining);
+ struct as_insert_rt rt = allocate_object (cap_cappage, cappage_addr);
+ if (rt.cap.type == cap_void)
+ /* No memory. */
+ return NULL;
+
+ cappage = cap_to_object (activity, &rt.cap);
+
+ /* Indirect access to the object designated by ROOT via the
+ appropriate slot in new cappage (the pivot). */
+ int pivot_idx = extract_bits_inv (root_guard,
+ root_gbits - gbits - 1,
+ subpage_bits);
+
+ addr_t pivot_addr = addr_extend (rt.storage,
+ pivot_idx,
+ CAPPAGE_SLOTS_LOG2);
+ addr_t root_addr = addr_chop (cappage_addr, gbits);
+
+ struct cap_addr_trans addr_trans = root->addr_trans;
+ int d = tilobject - gbits - subpage_bits;
+ CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
+ extract_bits64 (root_guard, 0, d), d);
+
+ bool r = cap_copy_x (activity,
+ &cappage->caps[pivot_idx], pivot_addr,
+ *root, root_addr,
+ CAP_COPY_COPY_GUARD, addr_trans);
+ assert (r);
+
+ /* Finally, set the slot at ROOT to point to CAPPAGE. */
+ root_guard = extract_bits64_inv (root_guard,
+ root_gbits - 1, gbits);
+ r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
+ root_guard, gbits,
+ 0 /* We always use the
+ first subpage in
+ a page. */,
+ 1 << (CAPPAGE_SLOTS_LOG2
+ - subpage_bits));
+ assert (r);
+
+ r = cap_copy_x (activity, root, root_addr, rt.cap, rt.storage,
+ CAP_COPY_COPY_SUBPAGE | CAP_COPY_COPY_GUARD,
+ addr_trans);
+ assert (r);
+ }
+
+ /* Index CAPPAGE finding the next PTE. */
+
+ /* The cappage referenced by ROOT translates WIDTH bits. */
+ int width = CAP_SUBPAGE_SIZE_LOG2 (root);
+ /* That should not be more than we have left to translate. */
+ if (width > remaining)
+ {
+ as_dump_from (activity, start, __func__);
+ panic ("Can't index %d-bit cappage: not enough bits (%d)",
+ width, remaining);
+ }
+ int idx = extract_bits64_inv (addr, remaining - 1, width);
+ root = &cappage->caps[CAP_SUBPAGE_OFFSET (root) + idx];
+
+ remaining -= width;
+ }
+ while (remaining > 0);
+
+ if (! may_overwrite)
+ assert (root->type == cap_void);
+
+ int gbits = remaining;
+ l4_word_t guard = extract_bits64 (addr, 0, gbits);
+ if (gbits != CAP_GUARD_BITS (root) || guard != CAP_GUARD (root))
+ {
+ struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
+ bool r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
+ 0, 1);
+ assert (r);
+
+ r = cap_copy_x (activity, root, addr_chop (a, gbits),
+ *root, addr_chop (a, gbits),
+ CAP_COPY_COPY_GUARD, addr_trans);
+ assert (r);
+ }
+
+ return root;
+}
+
+/* Ensure that the slot designated by A is accessible. */
+struct cap *
+as_slot_ensure_full (activity_t activity,
+ struct cap *root, addr_t a,
+ struct as_insert_rt
+ (*allocate_object) (enum cap_type type, addr_t addr))
+{
+ return as_build_internal (activity, root, a, allocate_object, true);
+}
+
+void
+as_insert (activity_t activity,
+ struct cap *root, addr_t addr, struct cap entry, addr_t entry_addr,
+ struct as_insert_rt (*allocate_object) (enum cap_type type,
+ addr_t addr))
+{
+ struct cap *slot = as_build_internal (activity, root, addr, allocate_object,
+ false);
+ cap_copy (activity, slot, addr, entry, entry_addr);
+}
+
+static void
+print_nr (int width, l4_int64_t nr, bool hex)
+{
+ int base = 10;
+ if (hex)
+ base = 16;
+
+ l4_int64_t v = nr;
+ int w = 0;
+ if (v < 0)
+ {
+ v = -v;
+ w ++;
+ }
+ do
+ {
+ w ++;
+ v /= base;
+ }
+ while (v > 0);
+
+ int i;
+ for (i = w; i < width; i ++)
+ putchar (' ');
+
+ if (hex)
+ printf ("0x%llx", nr);
+ else
+ printf ("%lld", nr);
+}
+
+static void
+do_walk (activity_t activity, int index, struct cap *root, addr_t addr,
+ int indent, const char *output_prefix)
+{
+ int i;
+
+ struct cap cap = cap_lookup_rel (activity, root, addr, -1, NULL);
+ if (cap.type == cap_void)
+ return;
+
+ if (output_prefix)
+ printf ("%s: ", output_prefix);
+ for (i = 0; i < indent; i ++)
+ printf (".");
+
+ printf ("[ ");
+ if (index != -1)
+ print_nr (3, index, false);
+ else
+ printf ("root");
+ printf (" ] ");
+
+ print_nr (12, addr_prefix (addr), true);
+ printf ("/%d ", addr_depth (addr));
+ if (CAP_GUARD_BITS (&cap))
+ printf ("| 0x%llx/%d ", CAP_GUARD (&cap), CAP_GUARD_BITS (&cap));
+ if (CAP_SUBPAGES (&cap) != 1)
+ printf ("(%d/%d) ", CAP_SUBPAGE (&cap), CAP_SUBPAGES (&cap));
+
+ if (CAP_GUARD_BITS (&cap)
+ && ADDR_BITS - addr_depth (addr) >= CAP_GUARD_BITS (&cap))
+ printf ("=> 0x%llx/%d ",
+ addr_prefix (addr_extend (addr,
+ CAP_GUARD (&cap),
+ CAP_GUARD_BITS (&cap))),
+ addr_depth (addr) + CAP_GUARD_BITS (&cap));
+
+#ifdef RM_INTERN
+ printf ("@%llx ", cap.oid);
+#endif
+ printf ("%s", cap_type_string (cap.type));
+
+ if (! cap_to_object (activity, &cap))
+ {
+ printf (" <- LOST (likely deallocated)\n");
+ return;
+ }
+
+ printf ("\n");
+
+ if (addr_depth (addr) + CAP_GUARD_BITS (&cap) > ADDR_BITS)
+ return;
+
+ addr = addr_extend (addr, CAP_GUARD (&cap), CAP_GUARD_BITS (&cap));
+
+ switch (cap.type)
+ {
+ case cap_cappage:
+ case cap_rcappage:
+ if (addr_depth (addr) + CAP_SUBPAGE_SIZE_LOG2 (&cap) > ADDR_BITS)
+ return;
+
+ for (i = 0; i < CAP_SUBPAGE_SIZE (&cap); i ++)
+ do_walk (activity, i, root,
+ addr_extend (addr, i, CAP_SUBPAGE_SIZE_LOG2 (&cap)),
+ indent + 1, output_prefix);
+
+ return;
+
+ case cap_folio:
+ if (addr_depth (addr) + FOLIO_OBJECTS_LOG2 > ADDR_BITS)
+ return;
+
+ for (i = 0; i < FOLIO_OBJECTS; i ++)
+ do_walk (activity, i, root,
+ addr_extend (addr, i, FOLIO_OBJECTS_LOG2),
+ indent + 1, output_prefix);
+
+ return;
+
+ default:
+ return;
+ }
+}
+
+void
+as_dump_from (activity_t activity, struct cap *root, const char *prefix)
+{
+ do_walk (activity, -1, root, ADDR (0, 0), 0, prefix);
+}
diff --git a/viengoos/as.h b/viengoos/as.h
new file mode 100644
index 0000000..03e4222
--- /dev/null
+++ b/viengoos/as.h
@@ -0,0 +1,52 @@
+/* as.h - Address space composition helper functions interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_AS_H
+#define RM_AS_H
+
+#include <l4.h>
+#include <hurd/cap.h>
+
+struct as_insert_rt
+{
+ struct cap cap;
+ addr_t storage;
+};
+
+/* Callback used by the following function. When called must return a
+ cap designating an object of type TYPE. */
+typedef struct as_insert_rt allocate_object_callback_t (enum cap_type type,
+ addr_t addr);
+
+/* Insert the object described by entry ENTRY at address ADDR into the
+ address space rooted at ROOT. ALLOC_OBJECT is a callback to
+ allocate an object of type TYPE at address ADDR. The callback
+ should NOT insert the allocated object into the addresss space. */
+extern void as_insert (activity_t activity,
+ struct cap *root, addr_t addr,
+ struct cap entry, addr_t entry_addr,
+ allocate_object_callback_t alloc);
+
+/* If debugging is enabled dump the address space described by ROOT.
+ PREFIX is prefixed to each line of output. */
+extern void as_dump_from (activity_t activity,
+ struct cap *root, const char *prefix);
+
+#endif
diff --git a/viengoos/bits.h b/viengoos/bits.h
new file mode 100644
index 0000000..22823e3
--- /dev/null
+++ b/viengoos/bits.h
@@ -0,0 +1,87 @@
+/* bits.h - Bit manipulation functions.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_BITS_H
+#define RM_BITS_H
+
+#include <stdint.h>
+#include <assert.h>
+
+/* Return the C bits of word W starting a bit S. (The LSB is 0 and
+ the MSB is L4_WORDSIZE.) */
+static inline unsigned int
+extract_bits (unsigned int w, int s, int c)
+{
+ assert (0 <= s && s < (sizeof (unsigned int) * 8));
+ assert (0 <= c && s + c <= (sizeof (unsigned int) * 8));
+
+ if (c == (sizeof (unsigned int) * 8))
+ /* 1U << (sizeof (unsigned int) * 8) is problematic: "If the value of
+ the right operand is negative or is greater than or equal to
+ the width of the promoted left operand, the behavior is
+ undefined." */
+ {
+ assert (s == 0);
+ return w;
+ }
+ else
+ return (w >> s) & ((1ULL << c) - 1);
+}
+
+static inline uint64_t
+extract_bits64 (uint64_t w, int s, int c)
+{
+ assert (0 <= s && s < (sizeof (uint64_t) * 8));
+ assert (0 <= c && s + c <= (sizeof (uint64_t) * 8));
+
+ if (c == (sizeof (uint64_t) * 8))
+ {
+ assert (s == 0);
+ return w;
+ }
+ else
+ return (w >> s) & ((1ULL << c) - 1);
+}
+
+/* Return the C bits of word W ending at bit E. (The LSB is 0 and the
+ MSB is (sizeof (unsigned int) * 8).) */
+static inline unsigned int
+extract_bits_inv (unsigned int w, int e, int c)
+{
+ /* We special case this check here to allow extract_bits_inv (w, 31,
+ 0). */
+ if (c == 0)
+ return 0;
+
+ return extract_bits (w, e - c + 1, c);
+}
+
+static inline uint64_t
+extract_bits64_inv (uint64_t w, int e, int c)
+{
+ /* We special case this check here to allow extract_bits64_inv (w, 63,
+ 0). */
+ if (c == 0)
+ return 0;
+
+ return extract_bits64 (w, e - c + 1, c);
+}
+
+#endif
diff --git a/viengoos/boot-modules.c b/viengoos/boot-modules.c
new file mode 100644
index 0000000..0f0598a
--- /dev/null
+++ b/viengoos/boot-modules.c
@@ -0,0 +1,24 @@
+/* boot-modules.c - Boot module implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "boot-modules.h"
+
+struct boot_module boot_modules[BOOT_MODULES_MAX];
+int boot_module_count;
diff --git a/viengoos/boot-modules.h b/viengoos/boot-modules.h
new file mode 100644
index 0000000..2de2793
--- /dev/null
+++ b/viengoos/boot-modules.h
@@ -0,0 +1,39 @@
+/* boot-modules.h - Boot module interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_BOOT_MODULES_H
+#define RM_BOOT_MODULES_H
+
+#include <l4.h>
+
+struct boot_module
+{
+ l4_word_t start;
+ l4_word_t end;
+ char *command_line;
+};
+
+#define BOOT_MODULES_MAX 32
+
+/* These must be filled in by the architecture specific code. */
+extern struct boot_module boot_modules[BOOT_MODULES_MAX];
+extern int boot_module_count;
+
+#endif
diff --git a/viengoos/cap-lookup.c b/viengoos/cap-lookup.c
new file mode 100644
index 0000000..d80e701
--- /dev/null
+++ b/viengoos/cap-lookup.c
@@ -0,0 +1,324 @@
+/* cap-lookup.c - Address space walker.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <hurd/cap.h>
+#include <hurd/folio.h>
+#include <hurd/stddef.h>
+#include <assert.h>
+
+#include "bits.h"
+
+#ifdef RM_INTERN
+#include "object.h"
+#endif
+
+union rt
+{
+ struct cap cap;
+ struct cap *capp;
+};
+
+enum lookup_mode
+ {
+ want_cap,
+ want_slot,
+ want_object
+ };
+
+static bool
+lookup (activity_t activity,
+ struct cap *root, addr_t address,
+ enum cap_type type, bool *writable,
+ enum lookup_mode mode, union rt *rt)
+{
+ struct cap *start = root;
+
+ l4_uint64_t addr = addr_prefix (address);
+ l4_word_t remaining = addr_depth (address);
+ /* The code below assumes that the REMAINING significant bits are in the
+ lower bits, not upper. */
+ addr >>= (ADDR_BITS - remaining);
+
+ struct cap fake_slot;
+
+ /* Assume the object is writable until proven otherwise. */
+ int w = true;
+
+ while (remaining > 0)
+ {
+ assert (CAP_TYPE_MIN <= root->type && root->type <= CAP_TYPE_MAX);
+
+ if (cap_is_a (root, cap_rcappage))
+ /* The page directory is read-only. Note the weakened access
+ appropriately. */
+ {
+ if (type != -1 && type != cap_rpage && type != cap_rcappage)
+ {
+ debug (1, "Read-only cappage at %llx/%d but %s requires "
+ "write access",
+ addr_prefix (addr_chop (address, remaining)),
+ addr_depth (address) - remaining,
+ cap_type_string (type));
+
+ /* Translating this capability does not provide write
+ access. The only objects that are useful without write
+ access are read-only pages and read-only capability
+ pages. If the user is not looking for one of those,
+ then bail. */
+ return false;
+ }
+
+ w = false;
+ }
+
+ if (CAP_GUARD_BITS (root))
+ /* Check that ADDR contains the guard. */
+ {
+ int gdepth = CAP_GUARD_BITS (root);
+
+ if (gdepth > remaining)
+ {
+ debug (1, "Translating %llx/%d; not enough bits (%d) to "
+ "translate %d-bit guard at /%d",
+ addr_prefix (address), addr_depth (address),
+ remaining, gdepth, ADDR_BITS - remaining);
+ return false;
+ }
+
+ int guard = extract_bits64_inv (addr, remaining - 1, gdepth);
+ if (CAP_GUARD (root) != guard)
+ {
+ debug (1, "Guard 0x%llx/%d does not match 0x%llx's "
+ "bits %d-%d => 0x%x",
+ CAP_GUARD (root), CAP_GUARD_BITS (root), addr,
+ remaining - gdepth, remaining - 1, guard);
+ return false;
+ }
+
+ remaining -= gdepth;
+ }
+
+ if (remaining == 0)
+ /* We've translate the guard bits and there are no bits left
+ to translate. We now designate the object and not the
+ slot, however, if we designate an object, we always return
+ the slot pointing to it. */
+ break;
+
+ switch (root->type)
+ {
+ case cap_cappage:
+ case cap_rcappage:
+ {
+ /* Index the page table. */
+ int bits = CAP_SUBPAGE_SIZE_LOG2 (root);
+ if (remaining < bits)
+ {
+ debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
+ "to index %d-bit cappage at " ADDR_FMT,
+ ADDR_PRINTF (address), remaining, bits,
+ ADDR_PRINTF (addr_chop (address, remaining)));
+ return false;
+ }
+
+ struct object *object = cap_to_object (activity, root);
+ if (! object)
+ {
+#ifdef RM_INTERN
+ debug (1, "Failed to get object with OID %llx",
+ root->oid);
+#endif
+ return false;
+ }
+
+ int offset = CAP_SUBPAGE_OFFSET (root)
+ + extract_bits64_inv (addr, remaining - 1, bits);
+ assert (0 <= offset && offset < CAPPAGE_SLOTS);
+ remaining -= bits;
+
+ root = &object->caps[offset];
+ break;
+ }
+
+ case cap_folio:
+ if (remaining < FOLIO_OBJECTS_LOG2)
+ {
+ debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
+ "to index folio at " ADDR_FMT,
+ ADDR_PRINTF (address), remaining,
+ ADDR_PRINTF (addr_chop (address, remaining)));
+ return false;
+ }
+
+ struct object *object = cap_to_object (activity, root);
+#ifdef RM_INTERN
+ if (! object)
+ {
+ debug (1, "Failed to get object with OID %llx",
+ root->oid);
+ return false;
+ }
+#else
+ assert (object);
+#endif
+
+ struct folio *folio = (struct folio *) object;
+
+ root = &fake_slot;
+
+ int i = extract_bits64_inv (addr, remaining - 1, FOLIO_OBJECTS_LOG2);
+ if (folio->objects[i].type == cap_void)
+ {
+ memset (root, 0, sizeof (*root));
+ root->type = cap_void;
+ }
+ else
+ {
+
+#ifdef RM_INTERN
+ struct object_desc *fdesc;
+ fdesc = object_to_object_desc (object);
+
+ object = object_find (activity, fdesc->oid + i + 1);
+ assert (object);
+ *root = object_to_cap (object);
+#else
+ root->addr_trans = CAP_ADDR_TRANS_VOID;
+ root->type = folio->objects[i].type;
+ cap_set_shadow (root, cap_get_shadow (&folio->objects[i]));
+#endif
+ }
+
+ remaining -= FOLIO_OBJECTS_LOG2;
+ break;
+
+ default:
+ /* We designate a non-address bit translating object but we
+ have no bits left to translate. */
+ debug (1, "Encountered a %s at %llx/%d, expected a cappage",
+ cap_type_string (root->type),
+ addr_prefix (addr_chop (address, remaining)),
+ addr_depth (address) - remaining);
+ return false;
+ }
+
+ if (remaining == 0)
+ /* We've indexed the object and have no bits remaining to
+ translate. */
+ {
+ if (CAP_GUARD_BITS (root) && mode == want_object)
+ /* The caller wants an object but we haven't translated
+ the slot's guard. */
+ {
+ debug (1, "Found slot at %llx/%d but referenced object "
+ "(%s) has an untranslated guard of %lld/%d!",
+ addr_prefix (address), addr_depth (address),
+ cap_type_string (root->type), CAP_GUARD (root),
+ CAP_GUARD_BITS (root));
+ return false;
+ }
+
+ break;
+ }
+ }
+ assert (remaining == 0);
+
+ if (! cap_is_a (root, type))
+ /* Types don't match. They may, however, be compatible. */
+ {
+ if (((cap_is_a (root, cap_rpage) || cap_is_a (root, cap_page))
+ && (type == cap_rpage || type == cap_page))
+ || ((cap_is_a (root, cap_rcappage) || cap_is_a (root, cap_cappage))
+ && (type == cap_rcappage || type == cap_cappage)))
+ /* Type are compatible. We just need to downgrade the
+ rights. */
+ w = false;
+ else if (type != -1)
+ /* Incompatible types. */
+ {
+ as_dump_from (activity, start, __func__);
+ debug (4, "Requested type %s but cap at 0x%llx/%d designates a %s",
+ cap_type_string (type),
+ addr_prefix (address), addr_depth (address),
+ cap_type_string (root->type));
+ return false;
+ }
+ }
+
+ if (cap_is_a (root, cap_rpage) || cap_is_a (root, cap_rcappage))
+ w = false;
+
+ if (writable)
+ *writable = w;
+
+ if (mode == want_slot)
+ {
+ if (root == &fake_slot)
+ {
+ debug (1, "%llx/%d resolves to a folio object but want a slot",
+ addr_prefix (address), addr_depth (address));
+ return false;
+ }
+ rt->capp = root;
+ return true;
+ }
+ else
+ {
+ rt->cap = *root;
+ return true;
+ }
+}
+
+struct cap
+cap_lookup_rel (activity_t activity,
+ struct cap *root, addr_t address,
+ enum cap_type type, bool *writable)
+{
+ union rt rt;
+
+ if (! lookup (activity, root, address, type, writable, want_cap, &rt))
+ return (struct cap) { .type = cap_void };
+ return rt.cap;
+}
+
+struct cap
+object_lookup_rel (activity_t activity,
+ struct cap *root, addr_t address,
+ enum cap_type type, bool *writable)
+{
+ union rt rt;
+
+ if (! lookup (activity, root, address, type, writable, want_object, &rt))
+ return (struct cap) { .type = cap_void };
+ return rt.cap;
+}
+
+struct cap *
+slot_lookup_rel (activity_t activity,
+ struct cap *root, addr_t address,
+ enum cap_type type, bool *writable)
+{
+ union rt rt;
+
+ if (! lookup (activity, root, address, type, writable, want_slot, &rt))
+ return NULL;
+ return rt.capp;
+}
+
diff --git a/viengoos/cap.c b/viengoos/cap.c
new file mode 100644
index 0000000..f1ea7bb
--- /dev/null
+++ b/viengoos/cap.c
@@ -0,0 +1,166 @@
+/* cap.c - Basic capability framework.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <assert.h>
+#include <hurd/stddef.h>
+
+#include "cap.h"
+#include "object.h"
+#include "activity.h"
+
+const int cap_type_num_slots[] = { [cap_void] = 0,
+ [cap_page] = 0,
+ [cap_rpage] = 0,
+ [cap_cappage] = CAPPAGE_SLOTS,
+ [cap_rcappage] = CAPPAGE_SLOTS,
+ [cap_folio] = 0,
+ [cap_activity] = 0,
+ [cap_activity_control] = 0,
+ [cap_thread] = THREAD_SLOTS };
+
+void
+cap_set_object (struct cap *cap,
+ struct object *object, enum cap_type type)
+{
+ *cap = object_to_cap (object);
+ assert (cap->type == type);
+}
+
+struct object *
+cap_to_object (struct activity *activity, struct cap *cap)
+{
+ if (cap->type == cap_void)
+ return NULL;
+
+ struct object *object = object_find (activity, cap->oid);
+ if (! object)
+ return NULL;
+
+ struct object_desc *desc = object_to_object_desc (object);
+ if (desc->version != cap->version)
+ {
+ /* Clear the capability to save the effort of looking up the
+ object in the future. */
+ cap->type = cap_void;
+ return NULL;
+ }
+
+ /* If the capability is valid, then the types must match. */
+ assert (cap->type == desc->type);
+
+ return object;
+}
+
+void
+cap_shootdown (struct activity *activity, struct cap *cap)
+{
+ /* XXX: A recursive function may not be the best idea here. We are
+ guaranteed, however, at most 63 nested calls. */
+ void doit (struct cap *cap, int remaining)
+ {
+ int i;
+ struct object *object;
+
+ remaining -= CAP_GUARD_BITS (cap);
+
+ switch (cap->type)
+ {
+ case cap_page:
+ case cap_rpage:
+ if (remaining < PAGESIZE_LOG2)
+ return;
+
+ /* If the object is not in memory, then it can't be
+ mapped. */
+ object = object_find_soft (activity, cap->oid);
+ if (! object)
+ return;
+
+ struct object_desc *desc = object_to_object_desc (object);
+ if (desc->version != cap->version)
+ {
+ /* Clear the capability to save the effort of looking up the
+ object in the future. */
+ cap->type = cap_void;
+ return;
+ }
+
+#ifndef _L4_TEST_ENVIRONMENT
+ l4_fpage_t fpage = l4_fpage ((l4_word_t) object, PAGESIZE);
+ fpage = l4_fpage_add_rights (fpage, L4_FPAGE_FULLY_ACCESSIBLE);
+ l4_unmap_fpage (fpage);
+#endif
+ return;
+
+ case cap_cappage:
+ case cap_rcappage:
+ if (remaining < CAP_SUBPAGE_SIZE_LOG2 (cap) + PAGESIZE_LOG2)
+ return;
+
+ object = cap_to_object (activity, cap);
+ if (! object)
+ return;
+
+ remaining -= CAP_SUBPAGE_SIZE_LOG2 (cap);
+
+ for (i = 0; i < CAP_SUBPAGE_SIZE_LOG2 (cap); i ++)
+ doit (&object->caps[i], remaining);
+
+ return;
+
+ case cap_folio:
+ if (remaining < FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
+ return;
+
+ object = cap_to_object (activity, cap);
+ if (! object)
+ return;
+
+ struct folio *folio = (struct folio *) object;
+ struct object_desc *fdesc = object_to_object_desc (object);
+ oid_t foid = fdesc->oid;
+
+ remaining -= FOLIO_OBJECTS_LOG2;
+
+ for (i = 0; i < FOLIO_OBJECTS_LOG2; i ++)
+ if (folio->objects[i].type == cap_page
+ || folio->objects[i].type == cap_rpage
+ || folio->objects[i].type == cap_cappage
+ || folio->objects[i].type == cap_rcappage)
+ {
+ struct cap cap;
+
+ cap.version = folio->objects[i].version;
+ cap.type = folio->objects[i].type;
+ cap.addr_trans = CAP_ADDR_TRANS_VOID;
+ cap.oid = foid + 1 + i;
+
+ doit (&cap, remaining);
+ }
+
+ return;
+
+ default:
+ return;
+ }
+ }
+
+ doit (cap, ADDR_BITS);
+}
diff --git a/viengoos/cap.h b/viengoos/cap.h
new file mode 100644
index 0000000..d6ecfb5
--- /dev/null
+++ b/viengoos/cap.h
@@ -0,0 +1,44 @@
+/* cap.h - Basic capability framework interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_CAP_H
+#define RM_CAP_H
+
+#include <l4.h>
+#include <hurd/cap.h>
+
+#include "rm.h"
+
+/* The number of slots in a capability object of the given type. */
+extern const int cap_type_num_slots[];
+
+/* Set's the capability TARGET to point to the same object as the
+ capability SOURCE, however, preserves the guard in TARGET. */
+static inline bool
+cap_set (struct cap *target, struct cap source)
+{
+ /* This is kosher as we know the implementation of CAP_COPY. */
+ return cap_copy (0, target, ADDR_VOID, source, ADDR_VOID);
+}
+
+/* Invalidate all mappings that may depend on this object. */
+extern void cap_shootdown (struct activity *activity, struct cap *cap);
+
+#endif
diff --git a/viengoos/config.m4 b/viengoos/config.m4
new file mode 100644
index 0000000..0635cc5
--- /dev/null
+++ b/viengoos/config.m4
@@ -0,0 +1,21 @@
+# config.m4 - Configure snippet for rm.
+# Copyright (C) 2003 Free Software Foundation, Inc.
+# Written by Maurizio Boriani.
+#
+# This file is part of the GNU Hurd.
+#
+# The GNU Hurd is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2, or (at
+# your option) any later version.
+#
+# The GNU Hurd is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+
+HURD_LOAD_ADDRESS(rm, 0x400000)
diff --git a/viengoos/debug.c b/viengoos/debug.c
new file mode 100644
index 0000000..21eb508
--- /dev/null
+++ b/viengoos/debug.c
@@ -0,0 +1,23 @@
+/* debug.h - Debugging implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "debug.h"
+
+int output_debug;
diff --git a/viengoos/debug.h b/viengoos/debug.h
new file mode 100644
index 0000000..db2edcb
--- /dev/null
+++ b/viengoos/debug.h
@@ -0,0 +1,35 @@
+/* debug.h - Debugging interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_DEBUG_H
+#define RM_DEBUG_H
+
+#include "output.h"
+
+#include <hurd/stddef.h>
+
+/* Invoke kernel debugger. */
+#ifdef _L4_TEST_ENVIRONMENT
+#define debugger()
+#else
+#define debugger() asm ("int $3");
+#endif
+
+#endif
diff --git a/viengoos/elf.h b/viengoos/elf.h
new file mode 100644
index 0000000..f5e6400
--- /dev/null
+++ b/viengoos/elf.h
@@ -0,0 +1,2438 @@
+/* This file defines standard ELF types, structures, and macros.
+ Copyright (C) 1995-1999,2000,2001,2002,2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _ELF_H
+#define _ELF_H 1
+
+/* Standard ELF types. */
+
+#include <stdint.h>
+
+/* Type for a 16-bit quantity. */
+typedef uint16_t Elf32_Half;
+typedef uint16_t Elf64_Half;
+
+/* Types for signed and unsigned 32-bit quantities. */
+typedef uint32_t Elf32_Word;
+typedef int32_t Elf32_Sword;
+typedef uint32_t Elf64_Word;
+typedef int32_t Elf64_Sword;
+
+/* Types for signed and unsigned 64-bit quantities. */
+typedef uint64_t Elf32_Xword;
+typedef int64_t Elf32_Sxword;
+typedef uint64_t Elf64_Xword;
+typedef int64_t Elf64_Sxword;
+
+/* Type of addresses. */
+typedef uint32_t Elf32_Addr;
+typedef uint64_t Elf64_Addr;
+
+/* Type of file offsets. */
+typedef uint32_t Elf32_Off;
+typedef uint64_t Elf64_Off;
+
+/* Type for section indices, which are 16-bit quantities. */
+typedef uint16_t Elf32_Section;
+typedef uint16_t Elf64_Section;
+
+/* Type for version symbol information. */
+typedef Elf32_Half Elf32_Versym;
+typedef Elf64_Half Elf64_Versym;
+
+
+/* The ELF file header. This appears at the start of every ELF file. */
+
+#define EI_NIDENT (16)
+
+typedef struct
+{
+ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
+ Elf32_Half e_type; /* Object file type */
+ Elf32_Half e_machine; /* Architecture */
+ Elf32_Word e_version; /* Object file version */
+ Elf32_Addr e_entry; /* Entry point virtual address */
+ Elf32_Off e_phoff; /* Program header table file offset */
+ Elf32_Off e_shoff; /* Section header table file offset */
+ Elf32_Word e_flags; /* Processor-specific flags */
+ Elf32_Half e_ehsize; /* ELF header size in bytes */
+ Elf32_Half e_phentsize; /* Program header table entry size */
+ Elf32_Half e_phnum; /* Program header table entry count */
+ Elf32_Half e_shentsize; /* Section header table entry size */
+ Elf32_Half e_shnum; /* Section header table entry count */
+ Elf32_Half e_shstrndx; /* Section header string table index */
+} Elf32_Ehdr;
+
+typedef struct
+{
+ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
+ Elf64_Half e_type; /* Object file type */
+ Elf64_Half e_machine; /* Architecture */
+ Elf64_Word e_version; /* Object file version */
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Word e_flags; /* Processor-specific flags */
+ Elf64_Half e_ehsize; /* ELF header size in bytes */
+ Elf64_Half e_phentsize; /* Program header table entry size */
+ Elf64_Half e_phnum; /* Program header table entry count */
+ Elf64_Half e_shentsize; /* Section header table entry size */
+ Elf64_Half e_shnum; /* Section header table entry count */
+ Elf64_Half e_shstrndx; /* Section header string table index */
+} Elf64_Ehdr;
+
+/* Fields in the e_ident array. The EI_* macros are indices into the
+ array. The macros under each EI_* macro are the values the byte
+ may have. */
+
+#define EI_MAG0 0 /* File identification byte 0 index */
+#define ELFMAG0 0x7f /* Magic number byte 0 */
+
+#define EI_MAG1 1 /* File identification byte 1 index */
+#define ELFMAG1 'E' /* Magic number byte 1 */
+
+#define EI_MAG2 2 /* File identification byte 2 index */
+#define ELFMAG2 'L' /* Magic number byte 2 */
+
+#define EI_MAG3 3 /* File identification byte 3 index */
+#define ELFMAG3 'F' /* Magic number byte 3 */
+
+/* Conglomeration of the identification bytes, for easy testing as a word. */
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
+
+#define EI_CLASS 4 /* File class byte index */
+#define ELFCLASSNONE 0 /* Invalid class */
+#define ELFCLASS32 1 /* 32-bit objects */
+#define ELFCLASS64 2 /* 64-bit objects */
+#define ELFCLASSNUM 3
+
+#define EI_DATA 5 /* Data encoding byte index */
+#define ELFDATANONE 0 /* Invalid data encoding */
+#define ELFDATA2LSB 1 /* 2's complement, little endian */
+#define ELFDATA2MSB 2 /* 2's complement, big endian */
+#define ELFDATANUM 3
+
+#define EI_VERSION 6 /* File version byte index */
+ /* Value must be EV_CURRENT */
+
+#define EI_OSABI 7 /* OS ABI identification */
+#define ELFOSABI_NONE 0 /* UNIX System V ABI */
+#define ELFOSABI_SYSV 0 /* Alias. */
+#define ELFOSABI_HPUX 1 /* HP-UX */
+#define ELFOSABI_NETBSD 2 /* NetBSD. */
+#define ELFOSABI_LINUX 3 /* Linux. */
+#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */
+#define ELFOSABI_AIX 7 /* IBM AIX. */
+#define ELFOSABI_IRIX 8 /* SGI Irix. */
+#define ELFOSABI_FREEBSD 9 /* FreeBSD. */
+#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */
+#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
+#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
+#define ELFOSABI_ARM 97 /* ARM */
+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
+
+#define EI_ABIVERSION 8 /* ABI version */
+
+#define EI_PAD 9 /* Byte index of padding bytes */
+
+/* Legal values for e_type (object file type). */
+
+#define ET_NONE 0 /* No file type */
+#define ET_REL 1 /* Relocatable file */
+#define ET_EXEC 2 /* Executable file */
+#define ET_DYN 3 /* Shared object file */
+#define ET_CORE 4 /* Core file */
+#define ET_NUM 5 /* Number of defined types */
+#define ET_LOOS 0xfe00 /* OS-specific range start */
+#define ET_HIOS 0xfeff /* OS-specific range end */
+#define ET_LOPROC 0xff00 /* Processor-specific range start */
+#define ET_HIPROC 0xffff /* Processor-specific range end */
+
+/* Legal values for e_machine (architecture). */
+
+#define EM_NONE 0 /* No machine */
+#define EM_M32 1 /* AT&T WE 32100 */
+#define EM_SPARC 2 /* SUN SPARC */
+#define EM_386 3 /* Intel 80386 */
+#define EM_68K 4 /* Motorola m68k family */
+#define EM_88K 5 /* Motorola m88k family */
+#define EM_860 7 /* Intel 80860 */
+#define EM_MIPS 8 /* MIPS R3000 big-endian */
+#define EM_S370 9 /* IBM System/370 */
+#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */
+
+#define EM_PARISC 15 /* HPPA */
+#define EM_VPP500 17 /* Fujitsu VPP500 */
+#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
+#define EM_960 19 /* Intel 80960 */
+#define EM_PPC 20 /* PowerPC */
+#define EM_PPC64 21 /* PowerPC 64-bit */
+#define EM_S390 22 /* IBM S390 */
+
+#define EM_V800 36 /* NEC V800 series */
+#define EM_FR20 37 /* Fujitsu FR20 */
+#define EM_RH32 38 /* TRW RH-32 */
+#define EM_RCE 39 /* Motorola RCE */
+#define EM_ARM 40 /* ARM */
+#define EM_FAKE_ALPHA 41 /* Digital Alpha */
+#define EM_SH 42 /* Hitachi SH */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+#define EM_TRICORE 44 /* Siemens Tricore */
+#define EM_ARC 45 /* Argonaut RISC Core */
+#define EM_H8_300 46 /* Hitachi H8/300 */
+#define EM_H8_300H 47 /* Hitachi H8/300H */
+#define EM_H8S 48 /* Hitachi H8S */
+#define EM_H8_500 49 /* Hitachi H8/500 */
+#define EM_IA_64 50 /* Intel Merced */
+#define EM_MIPS_X 51 /* Stanford MIPS-X */
+#define EM_COLDFIRE 52 /* Motorola Coldfire */
+#define EM_68HC12 53 /* Motorola M68HC12 */
+#define EM_MMA 54 /* Fujitsu MMA Multimedia Accelerator*/
+#define EM_PCP 55 /* Siemens PCP */
+#define EM_NCPU 56 /* Sony nCPU embeeded RISC */
+#define EM_NDR1 57 /* Denso NDR1 microprocessor */
+#define EM_STARCORE 58 /* Motorola Start*Core processor */
+#define EM_ME16 59 /* Toyota ME16 processor */
+#define EM_ST100 60 /* STMicroelectronic ST100 processor */
+#define EM_TINYJ 61 /* Advanced Logic Corp. Tinyj emb.fam*/
+#define EM_X86_64 62 /* AMD x86-64 architecture */
+#define EM_PDSP 63 /* Sony DSP Processor */
+
+#define EM_FX66 66 /* Siemens FX66 microcontroller */
+#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16 mc */
+#define EM_ST7 68 /* STmicroelectronics ST7 8 bit mc */
+#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller */
+#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller */
+#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller */
+#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller */
+#define EM_SVX 73 /* Silicon Graphics SVx */
+#define EM_ST19 74 /* STMicroelectronics ST19 8 bit mc */
+#define EM_VAX 75 /* Digital VAX */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
+#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded processor */
+#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor */
+#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor */
+#define EM_MMIX 80 /* Donald Knuth's educational 64-bit processor */
+#define EM_HUANY 81 /* Harvard University machine-independent object files */
+#define EM_PRISM 82 /* SiTera Prism */
+#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller */
+#define EM_FR30 84 /* Fujitsu FR30 */
+#define EM_D10V 85 /* Mitsubishi D10V */
+#define EM_D30V 86 /* Mitsubishi D30V */
+#define EM_V850 87 /* NEC v850 */
+#define EM_M32R 88 /* Mitsubishi M32R */
+#define EM_MN10300 89 /* Matsushita MN10300 */
+#define EM_MN10200 90 /* Matsushita MN10200 */
+#define EM_PJ 91 /* picoJava */
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
+#define EM_ARC_A5 93 /* ARC Cores Tangent-A5 */
+#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */
+#define EM_NUM 95
+
+/* If it is necessary to assign new unofficial EM_* values, please
+ pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the
+ chances of collision with official or non-GNU unofficial values. */
+
+#define EM_ALPHA 0x9026
+
+/* Legal values for e_version (version). */
+
+#define EV_NONE 0 /* Invalid ELF version */
+#define EV_CURRENT 1 /* Current version */
+#define EV_NUM 2
+
+/* Section header. */
+
+typedef struct
+{
+ Elf32_Word sh_name; /* Section name (string tbl index) */
+ Elf32_Word sh_type; /* Section type */
+ Elf32_Word sh_flags; /* Section flags */
+ Elf32_Addr sh_addr; /* Section virtual addr at execution */
+ Elf32_Off sh_offset; /* Section file offset */
+ Elf32_Word sh_size; /* Section size in bytes */
+ Elf32_Word sh_link; /* Link to another section */
+ Elf32_Word sh_info; /* Additional section information */
+ Elf32_Word sh_addralign; /* Section alignment */
+ Elf32_Word sh_entsize; /* Entry size if section holds table */
+} Elf32_Shdr;
+
+typedef struct
+{
+ Elf64_Word sh_name; /* Section name (string tbl index) */
+ Elf64_Word sh_type; /* Section type */
+ Elf64_Xword sh_flags; /* Section flags */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Section size in bytes */
+ Elf64_Word sh_link; /* Link to another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
+} Elf64_Shdr;
+
+/* Special section indices. */
+
+#define SHN_UNDEF 0 /* Undefined section */
+#define SHN_LORESERVE 0xff00 /* Start of reserved indices */
+#define SHN_LOPROC 0xff00 /* Start of processor-specific */
+#define SHN_HIPROC 0xff1f /* End of processor-specific */
+#define SHN_LOOS 0xff20 /* Start of OS-specific */
+#define SHN_HIOS 0xff3f /* End of OS-specific */
+#define SHN_ABS 0xfff1 /* Associated symbol is absolute */
+#define SHN_COMMON 0xfff2 /* Associated symbol is common */
+#define SHN_XINDEX 0xffff /* Index is in extra table. */
+#define SHN_HIRESERVE 0xffff /* End of reserved indices */
+
+/* Legal values for sh_type (section type). */
+
+#define SHT_NULL 0 /* Section header table entry unused */
+#define SHT_PROGBITS 1 /* Program data */
+#define SHT_SYMTAB 2 /* Symbol table */
+#define SHT_STRTAB 3 /* String table */
+#define SHT_RELA 4 /* Relocation entries with addends */
+#define SHT_HASH 5 /* Symbol hash table */
+#define SHT_DYNAMIC 6 /* Dynamic linking information */
+#define SHT_NOTE 7 /* Notes */
+#define SHT_NOBITS 8 /* Program space with no data (bss) */
+#define SHT_REL 9 /* Relocation entries, no addends */
+#define SHT_SHLIB 10 /* Reserved */
+#define SHT_DYNSYM 11 /* Dynamic linker symbol table */
+#define SHT_INIT_ARRAY 14 /* Array of constructors */
+#define SHT_FINI_ARRAY 15 /* Array of destructors */
+#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */
+#define SHT_GROUP 17 /* Section group */
+#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */
+#define SHT_NUM 19 /* Number of defined types. */
+#define SHT_LOOS 0x60000000 /* Start OS-specific */
+#define SHT_GNU_LIBLIST 0x6ffffff7 /* Prelink library list */
+#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */
+#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */
+#define SHT_SUNW_move 0x6ffffffa
+#define SHT_SUNW_COMDAT 0x6ffffffb
+#define SHT_SUNW_syminfo 0x6ffffffc
+#define SHT_GNU_verdef 0x6ffffffd /* Version definition section. */
+#define SHT_GNU_verneed 0x6ffffffe /* Version needs section. */
+#define SHT_GNU_versym 0x6fffffff /* Version symbol table. */
+#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */
+#define SHT_HIOS 0x6fffffff /* End OS-specific type */
+#define SHT_LOPROC 0x70000000 /* Start of processor-specific */
+#define SHT_HIPROC 0x7fffffff /* End of processor-specific */
+#define SHT_LOUSER 0x80000000 /* Start of application-specific */
+#define SHT_HIUSER 0x8fffffff /* End of application-specific */
+
+/* Legal values for sh_flags (section flags). */
+
+#define SHF_WRITE (1 << 0) /* Writable */
+#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */
+#define SHF_EXECINSTR (1 << 2) /* Executable */
+#define SHF_MERGE (1 << 4) /* Might be merged */
+#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */
+#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */
+#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */
+#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling
+ required */
+#define SHF_GROUP (1 << 9) /* Section is member of a group. */
+#define SHF_TLS (1 << 10) /* Section hold thread-local data. */
+#define SHF_MASKOS 0x0ff00000 /* OS-specific. */
+#define SHF_MASKPROC 0xf0000000 /* Processor-specific */
+
+/* Section group handling. */
+#define GRP_COMDAT 0x1 /* Mark group as COMDAT. */
+
+/* Symbol table entry. */
+
+typedef struct
+{
+ Elf32_Word st_name; /* Symbol name (string tbl index) */
+ Elf32_Addr st_value; /* Symbol value */
+ Elf32_Word st_size; /* Symbol size */
+ unsigned char st_info; /* Symbol type and binding */
+ unsigned char st_other; /* Symbol visibility */
+ Elf32_Section st_shndx; /* Section index */
+} Elf32_Sym;
+
+typedef struct
+{
+ Elf64_Word st_name; /* Symbol name (string tbl index) */
+ unsigned char st_info; /* Symbol type and binding */
+ unsigned char st_other; /* Symbol visibility */
+ Elf64_Section st_shndx; /* Section index */
+ Elf64_Addr st_value; /* Symbol value */
+ Elf64_Xword st_size; /* Symbol size */
+} Elf64_Sym;
+
+/* The syminfo section if available contains additional information about
+ every dynamic symbol. */
+
+typedef struct
+{
+ Elf32_Half si_boundto; /* Direct bindings, symbol bound to */
+ Elf32_Half si_flags; /* Per symbol flags */
+} Elf32_Syminfo;
+
+typedef struct
+{
+ Elf64_Half si_boundto; /* Direct bindings, symbol bound to */
+ Elf64_Half si_flags; /* Per symbol flags */
+} Elf64_Syminfo;
+
+/* Possible values for si_boundto. */
+#define SYMINFO_BT_SELF 0xffff /* Symbol bound to self */
+#define SYMINFO_BT_PARENT 0xfffe /* Symbol bound to parent */
+#define SYMINFO_BT_LOWRESERVE 0xff00 /* Beginning of reserved entries */
+
+/* Possible bitmasks for si_flags. */
+#define SYMINFO_FLG_DIRECT 0x0001 /* Direct bound symbol */
+#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-thru symbol for translator */
+#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */
+#define SYMINFO_FLG_LAZYLOAD 0x0008 /* Symbol bound to object to be lazy
+ loaded */
+/* Syminfo version values. */
+#define SYMINFO_NONE 0
+#define SYMINFO_CURRENT 1
+#define SYMINFO_NUM 2
+
+
+/* How to extract and insert information held in the st_info field. */
+
+#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4)
+#define ELF32_ST_TYPE(val) ((val) & 0xf)
+#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field. */
+#define ELF64_ST_BIND(val) ELF32_ST_BIND (val)
+#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val)
+#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type))
+
+/* Legal values for ST_BIND subfield of st_info (symbol binding). */
+
+#define STB_LOCAL 0 /* Local symbol */
+#define STB_GLOBAL 1 /* Global symbol */
+#define STB_WEAK 2 /* Weak symbol */
+#define STB_NUM 3 /* Number of defined types. */
+#define STB_LOOS 10 /* Start of OS-specific */
+#define STB_HIOS 12 /* End of OS-specific */
+#define STB_LOPROC 13 /* Start of processor-specific */
+#define STB_HIPROC 15 /* End of processor-specific */
+
+/* Legal values for ST_TYPE subfield of st_info (symbol type). */
+
+#define STT_NOTYPE 0 /* Symbol type is unspecified */
+#define STT_OBJECT 1 /* Symbol is a data object */
+#define STT_FUNC 2 /* Symbol is a code object */
+#define STT_SECTION 3 /* Symbol associated with a section */
+#define STT_FILE 4 /* Symbol's name is file name */
+#define STT_COMMON 5 /* Symbol is a common data object */
+#define STT_TLS 6 /* Symbol is thread-local data object*/
+#define STT_NUM 7 /* Number of defined types. */
+#define STT_LOOS 10 /* Start of OS-specific */
+#define STT_HIOS 12 /* End of OS-specific */
+#define STT_LOPROC 13 /* Start of processor-specific */
+#define STT_HIPROC 15 /* End of processor-specific */
+
+
+/* Symbol table indices are found in the hash buckets and chain table
+ of a symbol hash table section. This special index value indicates
+ the end of a chain, meaning no further symbols are found in that bucket. */
+
+#define STN_UNDEF 0 /* End of a chain. */
+
+
+/* How to extract and insert information held in the st_other field. */
+
+#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
+
+/* For ELF64 the definitions are the same. */
+#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
+
+/* Symbol visibility specification encoded in the st_other field. */
+#define STV_DEFAULT 0 /* Default symbol visibility rules */
+#define STV_INTERNAL 1 /* Processor specific hidden class */
+#define STV_HIDDEN 2 /* Sym unavailable in other modules */
+#define STV_PROTECTED 3 /* Not preemptible, not exported */
+
+
+/* Relocation table entry without addend (in section of type SHT_REL). */
+
+typedef struct
+{
+ Elf32_Addr r_offset; /* Address */
+ Elf32_Word r_info; /* Relocation type and symbol index */
+} Elf32_Rel;
+
+/* I have seen two different definitions of the Elf64_Rel and
+ Elf64_Rela structures, so we'll leave them out until Novell (or
+ whoever) gets their act together. */
+/* The following, at least, is used on Sparc v9, MIPS, and Alpha. */
+
+typedef struct
+{
+ Elf64_Addr r_offset; /* Address */
+ Elf64_Xword r_info; /* Relocation type and symbol index */
+} Elf64_Rel;
+
+/* Relocation table entry with addend (in section of type SHT_RELA). */
+
+typedef struct
+{
+ Elf32_Addr r_offset; /* Address */
+ Elf32_Word r_info; /* Relocation type and symbol index */
+ Elf32_Sword r_addend; /* Addend */
+} Elf32_Rela;
+
+typedef struct
+{
+ Elf64_Addr r_offset; /* Address */
+ Elf64_Xword r_info; /* Relocation type and symbol index */
+ Elf64_Sxword r_addend; /* Addend */
+} Elf64_Rela;
+
+/* How to extract and insert information held in the r_info field. */
+
+#define ELF32_R_SYM(val) ((val) >> 8)
+#define ELF32_R_TYPE(val) ((val) & 0xff)
+#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff))
+
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type))
+
+/* Program segment header. */
+
+typedef struct
+{
+ Elf32_Word p_type; /* Segment type */
+ Elf32_Off p_offset; /* Segment file offset */
+ Elf32_Addr p_vaddr; /* Segment virtual address */
+ Elf32_Addr p_paddr; /* Segment physical address */
+ Elf32_Word p_filesz; /* Segment size in file */
+ Elf32_Word p_memsz; /* Segment size in memory */
+ Elf32_Word p_flags; /* Segment flags */
+ Elf32_Word p_align; /* Segment alignment */
+} Elf32_Phdr;
+
+typedef struct
+{
+ Elf64_Word p_type; /* Segment type */
+ Elf64_Word p_flags; /* Segment flags */
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment */
+} Elf64_Phdr;
+
+/* Legal values for p_type (segment type). */
+
+#define PT_NULL 0 /* Program header table entry unused */
+#define PT_LOAD 1 /* Loadable program segment */
+#define PT_DYNAMIC 2 /* Dynamic linking information */
+#define PT_INTERP 3 /* Program interpreter */
+#define PT_NOTE 4 /* Auxiliary information */
+#define PT_SHLIB 5 /* Reserved */
+#define PT_PHDR 6 /* Entry for header table itself */
+#define PT_TLS 7 /* Thread-local storage segment */
+#define PT_NUM 8 /* Number of defined types */
+#define PT_LOOS 0x60000000 /* Start of OS-specific */
+#define PT_GNU_EH_FRAME 0x6474e550 /* GCC .eh_frame_hdr segment */
+#define PT_LOSUNW 0x6ffffffa
+#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
+#define PT_SUNWSTACK 0x6ffffffb /* Stack segment */
+#define PT_HISUNW 0x6fffffff
+#define PT_HIOS 0x6fffffff /* End of OS-specific */
+#define PT_LOPROC 0x70000000 /* Start of processor-specific */
+#define PT_HIPROC 0x7fffffff /* End of processor-specific */
+
+/* Legal values for p_flags (segment flags). */
+
+#define PF_X (1 << 0) /* Segment is executable */
+#define PF_W (1 << 1) /* Segment is writable */
+#define PF_R (1 << 2) /* Segment is readable */
+#define PF_MASKOS 0x0ff00000 /* OS-specific */
+#define PF_MASKPROC 0xf0000000 /* Processor-specific */
+
+/* Legal values for note segment descriptor types for core files. */
+
+#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */
+#define NT_FPREGSET 2 /* Contains copy of fpregset struct */
+#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */
+#define NT_PRXREG 4 /* Contains copy of prxregset struct */
+#define NT_TASKSTRUCT 4 /* Contains copy of task structure */
+#define NT_PLATFORM 5 /* String from sysinfo(SI_PLATFORM) */
+#define NT_AUXV 6 /* Contains copy of auxv array */
+#define NT_GWINDOWS 7 /* Contains copy of gwindows struct */
+#define NT_ASRS 8 /* Contains copy of asrset struct */
+#define NT_PSTATUS 10 /* Contains copy of pstatus struct */
+#define NT_PSINFO 13 /* Contains copy of psinfo struct */
+#define NT_PRCRED 14 /* Contains copy of prcred struct */
+#define NT_UTSNAME 15 /* Contains copy of utsname struct */
+#define NT_LWPSTATUS 16 /* Contains copy of lwpstatus struct */
+#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */
+#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct*/
+
+/* Legal values for the note segment descriptor types for object files. */
+
+#define NT_VERSION 1 /* Contains a version string. */
+
+
+/* Dynamic section entry. */
+
+typedef struct
+{
+ Elf32_Sword d_tag; /* Dynamic entry type */
+ union
+ {
+ Elf32_Word d_val; /* Integer value */
+ Elf32_Addr d_ptr; /* Address value */
+ } d_un;
+} Elf32_Dyn;
+
+typedef struct
+{
+ Elf64_Sxword d_tag; /* Dynamic entry type */
+ union
+ {
+ Elf64_Xword d_val; /* Integer value */
+ Elf64_Addr d_ptr; /* Address value */
+ } d_un;
+} Elf64_Dyn;
+
+/* Legal values for d_tag (dynamic entry type). */
+
+#define DT_NULL 0 /* Marks end of dynamic section */
+#define DT_NEEDED 1 /* Name of needed library */
+#define DT_PLTRELSZ 2 /* Size in bytes of PLT relocs */
+#define DT_PLTGOT 3 /* Processor defined value */
+#define DT_HASH 4 /* Address of symbol hash table */
+#define DT_STRTAB 5 /* Address of string table */
+#define DT_SYMTAB 6 /* Address of symbol table */
+#define DT_RELA 7 /* Address of Rela relocs */
+#define DT_RELASZ 8 /* Total size of Rela relocs */
+#define DT_RELAENT 9 /* Size of one Rela reloc */
+#define DT_STRSZ 10 /* Size of string table */
+#define DT_SYMENT 11 /* Size of one symbol table entry */
+#define DT_INIT 12 /* Address of init function */
+#define DT_FINI 13 /* Address of termination function */
+#define DT_SONAME 14 /* Name of shared object */
+#define DT_RPATH 15 /* Library search path (deprecated) */
+#define DT_SYMBOLIC 16 /* Start symbol search here */
+#define DT_REL 17 /* Address of Rel relocs */
+#define DT_RELSZ 18 /* Total size of Rel relocs */
+#define DT_RELENT 19 /* Size of one Rel reloc */
+#define DT_PLTREL 20 /* Type of reloc in PLT */
+#define DT_DEBUG 21 /* For debugging; unspecified */
+#define DT_TEXTREL 22 /* Reloc might modify .text */
+#define DT_JMPREL 23 /* Address of PLT relocs */
+#define DT_BIND_NOW 24 /* Process relocations of object */
+#define DT_INIT_ARRAY 25 /* Array with addresses of init fct */
+#define DT_FINI_ARRAY 26 /* Array with addresses of fini fct */
+#define DT_INIT_ARRAYSZ 27 /* Size in bytes of DT_INIT_ARRAY */
+#define DT_FINI_ARRAYSZ 28 /* Size in bytes of DT_FINI_ARRAY */
+#define DT_RUNPATH 29 /* Library search path */
+#define DT_FLAGS 30 /* Flags for the object being loaded */
+#define DT_ENCODING 32 /* Start of encoded range */
+#define DT_PREINIT_ARRAY 32 /* Array with addresses of preinit fct*/
+#define DT_PREINIT_ARRAYSZ 33 /* size in bytes of DT_PREINIT_ARRAY */
+#define DT_NUM 34 /* Number used */
+#define DT_LOOS 0x6000000d /* Start of OS-specific */
+#define DT_HIOS 0x6ffff000 /* End of OS-specific */
+#define DT_LOPROC 0x70000000 /* Start of processor-specific */
+#define DT_HIPROC 0x7fffffff /* End of processor-specific */
+#define DT_PROCNUM DT_MIPS_NUM /* Most used by any processor */
+
+/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
+ Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
+ approach. */
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_GNU_PRELINKED 0x6ffffdf5 /* Prelinking timestamp */
+#define DT_GNU_CONFLICTSZ 0x6ffffdf6 /* Size of conflict section */
+#define DT_GNU_LIBLISTSZ 0x6ffffdf7 /* Size of library list */
+#define DT_CHECKSUM 0x6ffffdf8
+#define DT_PLTPADSZ 0x6ffffdf9
+#define DT_MOVEENT 0x6ffffdfa
+#define DT_MOVESZ 0x6ffffdfb
+#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */
+#define DT_POSFLAG_1 0x6ffffdfd /* Flags for DT_* entries, effecting
+ the following DT_* entry. */
+#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */
+#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */
+#define DT_VALRNGHI 0x6ffffdff
+#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag)) /* Reverse order! */
+#define DT_VALNUM 12
+
+/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
+ Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
+
+ If any adjustment is made to the ELF object after it has been
+ built these entries will need to be adjusted. */
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_GNU_CONFLICT 0x6ffffef8 /* Start of conflict section */
+#define DT_GNU_LIBLIST 0x6ffffef9 /* Library list */
+#define DT_CONFIG 0x6ffffefa /* Configuration information. */
+#define DT_DEPAUDIT 0x6ffffefb /* Dependency auditing. */
+#define DT_AUDIT 0x6ffffefc /* Object auditing. */
+#define DT_PLTPAD 0x6ffffefd /* PLT padding. */
+#define DT_MOVETAB 0x6ffffefe /* Move table. */
+#define DT_SYMINFO 0x6ffffeff /* Syminfo table. */
+#define DT_ADDRRNGHI 0x6ffffeff
+#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */
+#define DT_ADDRNUM 10
+
+/* The versioning entry types. The next are defined as part of the
+ GNU extension. */
+#define DT_VERSYM 0x6ffffff0
+
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+
+/* These were chosen by Sun. */
+#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */
+#define DT_VERDEF 0x6ffffffc /* Address of version definition
+ table */
+#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */
+#define DT_VERNEED 0x6ffffffe /* Address of table with needed
+ versions */
+#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */
+#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */
+#define DT_VERSIONTAGNUM 16
+
+/* Sun added these machine-independent extensions in the "processor-specific"
+ range. Be compatible. */
+#define DT_AUXILIARY 0x7ffffffd /* Shared object to load before self */
+#define DT_FILTER 0x7fffffff /* Shared object to get values from */
+#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1)
+#define DT_EXTRANUM 3
+
+/* Values of `d_un.d_val' in the DT_FLAGS entry. */
+#define DF_ORIGIN 0x00000001 /* Object may use DF_ORIGIN */
+#define DF_SYMBOLIC 0x00000002 /* Symbol resolutions starts here */
+#define DF_TEXTREL 0x00000004 /* Object contains text relocations */
+#define DF_BIND_NOW 0x00000008 /* No lazy binding for this object */
+#define DF_STATIC_TLS 0x00000010 /* Module uses the static TLS model */
+
+/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1
+ entry in the dynamic section. */
+#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */
+#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */
+#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */
+#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/
+#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/
+#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/
+#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */
+#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */
+#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */
+#define DF_1_TRANS 0x00000200
+#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */
+#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */
+#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */
+#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/
+#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */
+#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */
+#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */
+
+/* Flags for the feature selection in DT_FEATURE_1. */
+#define DTF_1_PARINIT 0x00000001
+#define DTF_1_CONFEXP 0x00000002
+
+/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */
+#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */
+#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not
+ generally available. */
+
+/* Version definition sections. */
+
+typedef struct
+{
+ Elf32_Half vd_version; /* Version revision */
+ Elf32_Half vd_flags; /* Version information */
+ Elf32_Half vd_ndx; /* Version Index */
+ Elf32_Half vd_cnt; /* Number of associated aux entries */
+ Elf32_Word vd_hash; /* Version name hash value */
+ Elf32_Word vd_aux; /* Offset in bytes to verdaux array */
+ Elf32_Word vd_next; /* Offset in bytes to next verdef
+ entry */
+} Elf32_Verdef;
+
+typedef struct
+{
+ Elf64_Half vd_version; /* Version revision */
+ Elf64_Half vd_flags; /* Version information */
+ Elf64_Half vd_ndx; /* Version Index */
+ Elf64_Half vd_cnt; /* Number of associated aux entries */
+ Elf64_Word vd_hash; /* Version name hash value */
+ Elf64_Word vd_aux; /* Offset in bytes to verdaux array */
+ Elf64_Word vd_next; /* Offset in bytes to next verdef
+ entry */
+} Elf64_Verdef;
+
+
+/* Legal values for vd_version (version revision). */
+#define VER_DEF_NONE 0 /* No version */
+#define VER_DEF_CURRENT 1 /* Current version */
+#define VER_DEF_NUM 2 /* Given version number */
+
+/* Legal values for vd_flags (version information flags). */
+#define VER_FLG_BASE 0x1 /* Version definition of file itself */
+#define VER_FLG_WEAK 0x2 /* Weak version identifier */
+
+/* Versym symbol index values. */
+#define VER_NDX_LOCAL 0 /* Symbol is local. */
+#define VER_NDX_GLOBAL 1 /* Symbol is global. */
+#define VER_NDX_LORESERVE 0xff00 /* Beginning of reserved entries. */
+#define VER_NDX_ELIMINATE 0xff01 /* Symbol is to be eliminated. */
+
+/* Auxialiary version information. */
+
+typedef struct
+{
+ Elf32_Word vda_name; /* Version or dependency names */
+ Elf32_Word vda_next; /* Offset in bytes to next verdaux
+ entry */
+} Elf32_Verdaux;
+
+typedef struct
+{
+ Elf64_Word vda_name; /* Version or dependency names */
+ Elf64_Word vda_next; /* Offset in bytes to next verdaux
+ entry */
+} Elf64_Verdaux;
+
+
+/* Version dependency section. */
+
+typedef struct
+{
+ Elf32_Half vn_version; /* Version of structure */
+ Elf32_Half vn_cnt; /* Number of associated aux entries */
+ Elf32_Word vn_file; /* Offset of filename for this
+ dependency */
+ Elf32_Word vn_aux; /* Offset in bytes to vernaux array */
+ Elf32_Word vn_next; /* Offset in bytes to next verneed
+ entry */
+} Elf32_Verneed;
+
+typedef struct
+{
+ Elf64_Half vn_version; /* Version of structure */
+ Elf64_Half vn_cnt; /* Number of associated aux entries */
+ Elf64_Word vn_file; /* Offset of filename for this
+ dependency */
+ Elf64_Word vn_aux; /* Offset in bytes to vernaux array */
+ Elf64_Word vn_next; /* Offset in bytes to next verneed
+ entry */
+} Elf64_Verneed;
+
+
+/* Legal values for vn_version (version revision). */
+#define VER_NEED_NONE 0 /* No version */
+#define VER_NEED_CURRENT 1 /* Current version */
+#define VER_NEED_NUM 2 /* Given version number */
+
+/* Auxiliary needed version information. */
+
+typedef struct
+{
+ Elf32_Word vna_hash; /* Hash value of dependency name */
+ Elf32_Half vna_flags; /* Dependency specific information */
+ Elf32_Half vna_other; /* Unused */
+ Elf32_Word vna_name; /* Dependency name string offset */
+ Elf32_Word vna_next; /* Offset in bytes to next vernaux
+ entry */
+} Elf32_Vernaux;
+
+typedef struct
+{
+ Elf64_Word vna_hash; /* Hash value of dependency name */
+ Elf64_Half vna_flags; /* Dependency specific information */
+ Elf64_Half vna_other; /* Unused */
+ Elf64_Word vna_name; /* Dependency name string offset */
+ Elf64_Word vna_next; /* Offset in bytes to next vernaux
+ entry */
+} Elf64_Vernaux;
+
+
+/* Legal values for vna_flags. */
+#define VER_FLG_WEAK 0x2 /* Weak version identifier */
+
+
+/* Auxiliary vector. */
+
+/* This vector is normally only used by the program interpreter. The
+ usual definition in an ABI supplement uses the name auxv_t. The
+ vector is not usually defined in a standard <elf.h> file, but it
+ can't hurt. We rename it to avoid conflicts. The sizes of these
+ types are an arrangement between the exec server and the program
+ interpreter, so we don't fully specify them here. */
+
+typedef struct
+{
+ int a_type; /* Entry type */
+ union
+ {
+ long int a_val; /* Integer value */
+ void *a_ptr; /* Pointer value */
+ void (*a_fcn) (void); /* Function pointer value */
+ } a_un;
+} Elf32_auxv_t;
+
+typedef struct
+{
+ long int a_type; /* Entry type */
+ union
+ {
+ long int a_val; /* Integer value */
+ void *a_ptr; /* Pointer value */
+ void (*a_fcn) (void); /* Function pointer value */
+ } a_un;
+} Elf64_auxv_t;
+
+/* Legal values for a_type (entry type). */
+
+#define AT_NULL 0 /* End of vector */
+#define AT_IGNORE 1 /* Entry should be ignored */
+#define AT_EXECFD 2 /* File descriptor of program */
+#define AT_PHDR 3 /* Program headers for program */
+#define AT_PHENT 4 /* Size of program header entry */
+#define AT_PHNUM 5 /* Number of program headers */
+#define AT_PAGESZ 6 /* System page size */
+#define AT_BASE 7 /* Base address of interpreter */
+#define AT_FLAGS 8 /* Flags */
+#define AT_ENTRY 9 /* Entry point of program */
+#define AT_NOTELF 10 /* Program is not ELF */
+#define AT_UID 11 /* Real uid */
+#define AT_EUID 12 /* Effective uid */
+#define AT_GID 13 /* Real gid */
+#define AT_EGID 14 /* Effective gid */
+#define AT_CLKTCK 17 /* Frequency of times() */
+
+/* Some more special a_type values describing the hardware. */
+#define AT_PLATFORM 15 /* String identifying platform. */
+#define AT_HWCAP 16 /* Machine dependent hints about
+ processor capabilities. */
+
+/* This entry gives some information about the FPU initialization
+ performed by the kernel. */
+#define AT_FPUCW 18 /* Used FPU control word. */
+
+/* Cache block sizes. */
+#define AT_DCACHEBSIZE 19 /* Data cache block size. */
+#define AT_ICACHEBSIZE 20 /* Instruction cache block size. */
+#define AT_UCACHEBSIZE 21 /* Unified cache block size. */
+
+/* A special ignored value for PPC, used by the kernel to control the
+ interpretation of the AUXV. Must be > 16. */
+#define AT_IGNOREPPC 22 /* Entry should be ignored */
+
+/* Pointer to the global system page used for system calls and other
+ nice things. */
+#define AT_SYSINFO 32
+#define AT_SYSINFO_EHDR 33
+
+
+/* Note section contents. Each entry in the note section begins with
+ a header of a fixed form. */
+
+typedef struct
+{
+ Elf32_Word n_namesz; /* Length of the note's name. */
+ Elf32_Word n_descsz; /* Length of the note's descriptor. */
+ Elf32_Word n_type; /* Type of the note. */
+} Elf32_Nhdr;
+
+typedef struct
+{
+ Elf64_Word n_namesz; /* Length of the note's name. */
+ Elf64_Word n_descsz; /* Length of the note's descriptor. */
+ Elf64_Word n_type; /* Type of the note. */
+} Elf64_Nhdr;
+
+/* Known names of notes. */
+
+/* Solaris entries in the note section have this name. */
+#define ELF_NOTE_SOLARIS "SUNW Solaris"
+
+/* Note entries for GNU systems have this name. */
+#define ELF_NOTE_GNU "GNU"
+
+
+/* Defined types of notes for Solaris. */
+
+/* Value of descriptor (one word) is desired pagesize for the binary. */
+#define ELF_NOTE_PAGESIZE_HINT 1
+
+
+/* Defined note types for GNU systems. */
+
+/* ABI information. The descriptor consists of words:
+ word 0: OS descriptor
+ word 1: major version of the ABI
+ word 2: minor version of the ABI
+ word 3: subminor version of the ABI
+*/
+#define ELF_NOTE_ABI 1
+
+/* Known OSes. These value can appear in word 0 of an ELF_NOTE_ABI
+ note section entry. */
+#define ELF_NOTE_OS_LINUX 0
+#define ELF_NOTE_OS_GNU 1
+#define ELF_NOTE_OS_SOLARIS2 2
+#define ELF_NOTE_OS_FREEBSD 3
+
+
+/* Move records. */
+typedef struct
+{
+ Elf32_Xword m_value; /* Symbol value. */
+ Elf32_Word m_info; /* Size and index. */
+ Elf32_Word m_poffset; /* Symbol offset. */
+ Elf32_Half m_repeat; /* Repeat count. */
+ Elf32_Half m_stride; /* Stride info. */
+} Elf32_Move;
+
+typedef struct
+{
+ Elf64_Xword m_value; /* Symbol value. */
+ Elf64_Xword m_info; /* Size and index. */
+ Elf64_Xword m_poffset; /* Symbol offset. */
+ Elf64_Half m_repeat; /* Repeat count. */
+ Elf64_Half m_stride; /* Stride info. */
+} Elf64_Move;
+
+/* Macro to construct move records. */
+#define ELF32_M_SYM(info) ((info) >> 8)
+#define ELF32_M_SIZE(info) ((unsigned char) (info))
+#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size))
+
+#define ELF64_M_SYM(info) ELF32_M_SYM (info)
+#define ELF64_M_SIZE(info) ELF32_M_SIZE (info)
+#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size)
+
+
+/* Motorola 68k specific definitions. */
+
+/* Values for Elf32_Ehdr.e_flags. */
+#define EF_CPU32 0x00810000
+
+/* m68k relocs. */
+
+#define R_68K_NONE 0 /* No reloc */
+#define R_68K_32 1 /* Direct 32 bit */
+#define R_68K_16 2 /* Direct 16 bit */
+#define R_68K_8 3 /* Direct 8 bit */
+#define R_68K_PC32 4 /* PC relative 32 bit */
+#define R_68K_PC16 5 /* PC relative 16 bit */
+#define R_68K_PC8 6 /* PC relative 8 bit */
+#define R_68K_GOT32 7 /* 32 bit PC relative GOT entry */
+#define R_68K_GOT16 8 /* 16 bit PC relative GOT entry */
+#define R_68K_GOT8 9 /* 8 bit PC relative GOT entry */
+#define R_68K_GOT32O 10 /* 32 bit GOT offset */
+#define R_68K_GOT16O 11 /* 16 bit GOT offset */
+#define R_68K_GOT8O 12 /* 8 bit GOT offset */
+#define R_68K_PLT32 13 /* 32 bit PC relative PLT address */
+#define R_68K_PLT16 14 /* 16 bit PC relative PLT address */
+#define R_68K_PLT8 15 /* 8 bit PC relative PLT address */
+#define R_68K_PLT32O 16 /* 32 bit PLT offset */
+#define R_68K_PLT16O 17 /* 16 bit PLT offset */
+#define R_68K_PLT8O 18 /* 8 bit PLT offset */
+#define R_68K_COPY 19 /* Copy symbol at runtime */
+#define R_68K_GLOB_DAT 20 /* Create GOT entry */
+#define R_68K_JMP_SLOT 21 /* Create PLT entry */
+#define R_68K_RELATIVE 22 /* Adjust by program base */
+/* Keep this the last entry. */
+#define R_68K_NUM 23
+
+/* Intel 80386 specific definitions. */
+
+/* i386 relocs. */
+
+#define R_386_NONE 0 /* No reloc */
+#define R_386_32 1 /* Direct 32 bit */
+#define R_386_PC32 2 /* PC relative 32 bit */
+#define R_386_GOT32 3 /* 32 bit GOT entry */
+#define R_386_PLT32 4 /* 32 bit PLT address */
+#define R_386_COPY 5 /* Copy symbol at runtime */
+#define R_386_GLOB_DAT 6 /* Create GOT entry */
+#define R_386_JMP_SLOT 7 /* Create PLT entry */
+#define R_386_RELATIVE 8 /* Adjust by program base */
+#define R_386_GOTOFF 9 /* 32 bit offset to GOT */
+#define R_386_GOTPC 10 /* 32 bit PC relative offset to GOT */
+#define R_386_32PLT 11
+#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */
+#define R_386_TLS_IE 15 /* Address of GOT entry for static TLS
+ block offset */
+#define R_386_TLS_GOTIE 16 /* GOT entry for static TLS block
+ offset */
+#define R_386_TLS_LE 17 /* Offset relative to static TLS
+ block */
+#define R_386_TLS_GD 18 /* Direct 32 bit for GNU version of
+ general dynamic thread local data */
+#define R_386_TLS_LDM 19 /* Direct 32 bit for GNU version of
+ local dynamic thread local data
+ in LE code */
+#define R_386_16 20
+#define R_386_PC16 21
+#define R_386_8 22
+#define R_386_PC8 23
+#define R_386_TLS_GD_32 24 /* Direct 32 bit for general dynamic
+ thread local data */
+#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */
+#define R_386_TLS_GD_CALL 26 /* Relocation for call to
+ __tls_get_addr() */
+#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */
+#define R_386_TLS_LDM_32 28 /* Direct 32 bit for local dynamic
+ thread local data in LE code */
+#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */
+#define R_386_TLS_LDM_CALL 30 /* Relocation for call to
+ __tls_get_addr() in LDM code */
+#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */
+#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */
+#define R_386_TLS_IE_32 33 /* GOT entry for negated static TLS
+ block offset */
+#define R_386_TLS_LE_32 34 /* Negated offset relative to static
+ TLS block */
+#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */
+#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */
+#define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */
+/* Keep this the last entry. */
+#define R_386_NUM 38
+
+/* SUN SPARC specific definitions. */
+
+/* Legal values for ST_TYPE subfield of st_info (symbol type). */
+
+#define STT_REGISTER 13 /* Global register reserved to app. */
+
+/* Values for Elf64_Ehdr.e_flags. */
+
+#define EF_SPARCV9_MM 3
+#define EF_SPARCV9_TSO 0
+#define EF_SPARCV9_PSO 1
+#define EF_SPARCV9_RMO 2
+#define EF_SPARC_LEDATA 0x800000 /* little endian data */
+#define EF_SPARC_EXT_MASK 0xFFFF00
+#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */
+#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */
+#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */
+#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */
+
+/* SPARC relocs. */
+
+#define R_SPARC_NONE 0 /* No reloc */
+#define R_SPARC_8 1 /* Direct 8 bit */
+#define R_SPARC_16 2 /* Direct 16 bit */
+#define R_SPARC_32 3 /* Direct 32 bit */
+#define R_SPARC_DISP8 4 /* PC relative 8 bit */
+#define R_SPARC_DISP16 5 /* PC relative 16 bit */
+#define R_SPARC_DISP32 6 /* PC relative 32 bit */
+#define R_SPARC_WDISP30 7 /* PC relative 30 bit shifted */
+#define R_SPARC_WDISP22 8 /* PC relative 22 bit shifted */
+#define R_SPARC_HI22 9 /* High 22 bit */
+#define R_SPARC_22 10 /* Direct 22 bit */
+#define R_SPARC_13 11 /* Direct 13 bit */
+#define R_SPARC_LO10 12 /* Truncated 10 bit */
+#define R_SPARC_GOT10 13 /* Truncated 10 bit GOT entry */
+#define R_SPARC_GOT13 14 /* 13 bit GOT entry */
+#define R_SPARC_GOT22 15 /* 22 bit GOT entry shifted */
+#define R_SPARC_PC10 16 /* PC relative 10 bit truncated */
+#define R_SPARC_PC22 17 /* PC relative 22 bit shifted */
+#define R_SPARC_WPLT30 18 /* 30 bit PC relative PLT address */
+#define R_SPARC_COPY 19 /* Copy symbol at runtime */
+#define R_SPARC_GLOB_DAT 20 /* Create GOT entry */
+#define R_SPARC_JMP_SLOT 21 /* Create PLT entry */
+#define R_SPARC_RELATIVE 22 /* Adjust by program base */
+#define R_SPARC_UA32 23 /* Direct 32 bit unaligned */
+
+/* Additional Sparc64 relocs. */
+
+#define R_SPARC_PLT32 24 /* Direct 32 bit ref to PLT entry */
+#define R_SPARC_HIPLT22 25 /* High 22 bit PLT entry */
+#define R_SPARC_LOPLT10 26 /* Truncated 10 bit PLT entry */
+#define R_SPARC_PCPLT32 27 /* PC rel 32 bit ref to PLT entry */
+#define R_SPARC_PCPLT22 28 /* PC rel high 22 bit PLT entry */
+#define R_SPARC_PCPLT10 29 /* PC rel trunc 10 bit PLT entry */
+#define R_SPARC_10 30 /* Direct 10 bit */
+#define R_SPARC_11 31 /* Direct 11 bit */
+#define R_SPARC_64 32 /* Direct 64 bit */
+#define R_SPARC_OLO10 33 /* 10bit with secondary 13bit addend */
+#define R_SPARC_HH22 34 /* Top 22 bits of direct 64 bit */
+#define R_SPARC_HM10 35 /* High middle 10 bits of ... */
+#define R_SPARC_LM22 36 /* Low middle 22 bits of ... */
+#define R_SPARC_PC_HH22 37 /* Top 22 bits of pc rel 64 bit */
+#define R_SPARC_PC_HM10 38 /* High middle 10 bit of ... */
+#define R_SPARC_PC_LM22 39 /* Low miggle 22 bits of ... */
+#define R_SPARC_WDISP16 40 /* PC relative 16 bit shifted */
+#define R_SPARC_WDISP19 41 /* PC relative 19 bit shifted */
+#define R_SPARC_7 43 /* Direct 7 bit */
+#define R_SPARC_5 44 /* Direct 5 bit */
+#define R_SPARC_6 45 /* Direct 6 bit */
+#define R_SPARC_DISP64 46 /* PC relative 64 bit */
+#define R_SPARC_PLT64 47 /* Direct 64 bit ref to PLT entry */
+#define R_SPARC_HIX22 48 /* High 22 bit complemented */
+#define R_SPARC_LOX10 49 /* Truncated 11 bit complemented */
+#define R_SPARC_H44 50 /* Direct high 12 of 44 bit */
+#define R_SPARC_M44 51 /* Direct mid 22 of 44 bit */
+#define R_SPARC_L44 52 /* Direct low 10 of 44 bit */
+#define R_SPARC_REGISTER 53 /* Global register usage */
+#define R_SPARC_UA64 54 /* Direct 64 bit unaligned */
+#define R_SPARC_UA16 55 /* Direct 16 bit unaligned */
+#define R_SPARC_TLS_GD_HI22 56
+#define R_SPARC_TLS_GD_LO10 57
+#define R_SPARC_TLS_GD_ADD 58
+#define R_SPARC_TLS_GD_CALL 59
+#define R_SPARC_TLS_LDM_HI22 60
+#define R_SPARC_TLS_LDM_LO10 61
+#define R_SPARC_TLS_LDM_ADD 62
+#define R_SPARC_TLS_LDM_CALL 63
+#define R_SPARC_TLS_LDO_HIX22 64
+#define R_SPARC_TLS_LDO_LOX10 65
+#define R_SPARC_TLS_LDO_ADD 66
+#define R_SPARC_TLS_IE_HI22 67
+#define R_SPARC_TLS_IE_LO10 68
+#define R_SPARC_TLS_IE_LD 69
+#define R_SPARC_TLS_IE_LDX 70
+#define R_SPARC_TLS_IE_ADD 71
+#define R_SPARC_TLS_LE_HIX22 72
+#define R_SPARC_TLS_LE_LOX10 73
+#define R_SPARC_TLS_DTPMOD32 74
+#define R_SPARC_TLS_DTPMOD64 75
+#define R_SPARC_TLS_DTPOFF32 76
+#define R_SPARC_TLS_DTPOFF64 77
+#define R_SPARC_TLS_TPOFF32 78
+#define R_SPARC_TLS_TPOFF64 79
+/* Keep this the last entry. */
+#define R_SPARC_NUM 80
+
+/* For Sparc64, legal values for d_tag of Elf64_Dyn. */
+
+#define DT_SPARC_REGISTER 0x70000001
+#define DT_SPARC_NUM 2
+
+/* Bits present in AT_HWCAP, primarily for Sparc32. */
+
+#define HWCAP_SPARC_FLUSH 1 /* The cpu supports flush insn. */
+#define HWCAP_SPARC_STBAR 2
+#define HWCAP_SPARC_SWAP 4
+#define HWCAP_SPARC_MULDIV 8
+#define HWCAP_SPARC_V9 16 /* The cpu is v9, so v8plus is ok. */
+#define HWCAP_SPARC_ULTRA3 32
+
+/* MIPS R3000 specific definitions. */
+
+/* Legal values for e_flags field of Elf32_Ehdr. */
+
+#define EF_MIPS_NOREORDER 1 /* A .noreorder directive was used */
+#define EF_MIPS_PIC 2 /* Contains PIC code */
+#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */
+#define EF_MIPS_XGOT 8
+#define EF_MIPS_64BIT_WHIRL 16
+#define EF_MIPS_ABI2 32
+#define EF_MIPS_ABI_ON32 64
+#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */
+
+/* Legal values for MIPS architecture level. */
+
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */
+
+/* The following are non-official names and should not be used. */
+
+#define E_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define E_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define E_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define E_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define E_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define E_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */
+#define E_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */
+
+/* Special section indices. */
+
+#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */
+#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */
+#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */
+#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */
+#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */
+
+/* Legal values for sh_type field of Elf32_Shdr. */
+
+#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */
+#define SHT_MIPS_MSYM 0x70000001
+#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */
+#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */
+#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */
+#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information*/
+#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */
+#define SHT_MIPS_PACKAGE 0x70000007
+#define SHT_MIPS_PACKSYM 0x70000008
+#define SHT_MIPS_RELD 0x70000009
+#define SHT_MIPS_IFACE 0x7000000b
+#define SHT_MIPS_CONTENT 0x7000000c
+#define SHT_MIPS_OPTIONS 0x7000000d /* Miscellaneous options. */
+#define SHT_MIPS_SHDR 0x70000010
+#define SHT_MIPS_FDESC 0x70000011
+#define SHT_MIPS_EXTSYM 0x70000012
+#define SHT_MIPS_DENSE 0x70000013
+#define SHT_MIPS_PDESC 0x70000014
+#define SHT_MIPS_LOCSYM 0x70000015
+#define SHT_MIPS_AUXSYM 0x70000016
+#define SHT_MIPS_OPTSYM 0x70000017
+#define SHT_MIPS_LOCSTR 0x70000018
+#define SHT_MIPS_LINE 0x70000019
+#define SHT_MIPS_RFDESC 0x7000001a
+#define SHT_MIPS_DELTASYM 0x7000001b
+#define SHT_MIPS_DELTAINST 0x7000001c
+#define SHT_MIPS_DELTACLASS 0x7000001d
+#define SHT_MIPS_DWARF 0x7000001e /* DWARF debugging information. */
+#define SHT_MIPS_DELTADECL 0x7000001f
+#define SHT_MIPS_SYMBOL_LIB 0x70000020
+#define SHT_MIPS_EVENTS 0x70000021 /* Event section. */
+#define SHT_MIPS_TRANSLATE 0x70000022
+#define SHT_MIPS_PIXIE 0x70000023
+#define SHT_MIPS_XLATE 0x70000024
+#define SHT_MIPS_XLATE_DEBUG 0x70000025
+#define SHT_MIPS_WHIRL 0x70000026
+#define SHT_MIPS_EH_REGION 0x70000027
+#define SHT_MIPS_XLATE_OLD 0x70000028
+#define SHT_MIPS_PDR_EXCEPTION 0x70000029
+
+/* Legal values for sh_flags field of Elf32_Shdr. */
+
+#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */
+#define SHF_MIPS_MERGE 0x20000000
+#define SHF_MIPS_ADDR 0x40000000
+#define SHF_MIPS_STRINGS 0x80000000
+#define SHF_MIPS_NOSTRIP 0x08000000
+#define SHF_MIPS_LOCAL 0x04000000
+#define SHF_MIPS_NAMES 0x02000000
+#define SHF_MIPS_NODUPE 0x01000000
+
+
+/* Symbol tables. */
+
+/* MIPS specific values for `st_other'. */
+#define STO_MIPS_DEFAULT 0x0
+#define STO_MIPS_INTERNAL 0x1
+#define STO_MIPS_HIDDEN 0x2
+#define STO_MIPS_PROTECTED 0x3
+#define STO_MIPS_SC_ALIGN_UNUSED 0xff
+
+/* MIPS specific values for `st_info'. */
+#define STB_MIPS_SPLIT_COMMON 13
+
+/* Entries found in sections of type SHT_MIPS_GPTAB. */
+
+typedef union
+{
+ struct
+ {
+ Elf32_Word gt_current_g_value; /* -G value used for compilation */
+ Elf32_Word gt_unused; /* Not used */
+ } gt_header; /* First entry in section */
+ struct
+ {
+ Elf32_Word gt_g_value; /* If this value were used for -G */
+ Elf32_Word gt_bytes; /* This many bytes would be used */
+ } gt_entry; /* Subsequent entries in section */
+} Elf32_gptab;
+
+/* Entry found in sections of type SHT_MIPS_REGINFO. */
+
+typedef struct
+{
+ Elf32_Word ri_gprmask; /* General registers used */
+ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */
+ Elf32_Sword ri_gp_value; /* $gp register value */
+} Elf32_RegInfo;
+
+/* Entries found in sections of type SHT_MIPS_OPTIONS. */
+
+typedef struct
+{
+ unsigned char kind; /* Determines interpretation of the
+ variable part of descriptor. */
+ unsigned char size; /* Size of descriptor, including header. */
+ Elf32_Section section; /* Section header index of section affected,
+ 0 for global options. */
+ Elf32_Word info; /* Kind-specific information. */
+} Elf_Options;
+
+/* Values for `kind' field in Elf_Options. */
+
+#define ODK_NULL 0 /* Undefined. */
+#define ODK_REGINFO 1 /* Register usage information. */
+#define ODK_EXCEPTIONS 2 /* Exception processing options. */
+#define ODK_PAD 3 /* Section padding options. */
+#define ODK_HWPATCH 4 /* Hardware workarounds performed */
+#define ODK_FILL 5 /* record the fill value used by the linker. */
+#define ODK_TAGS 6 /* reserve space for desktop tools to write. */
+#define ODK_HWAND 7 /* HW workarounds. 'AND' bits when merging. */
+#define ODK_HWOR 8 /* HW workarounds. 'OR' bits when merging. */
+
+/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries. */
+
+#define OEX_FPU_MIN 0x1f /* FPE's which MUST be enabled. */
+#define OEX_FPU_MAX 0x1f00 /* FPE's which MAY be enabled. */
+#define OEX_PAGE0 0x10000 /* page zero must be mapped. */
+#define OEX_SMM 0x20000 /* Force sequential memory mode? */
+#define OEX_FPDBUG 0x40000 /* Force floating point debug mode? */
+#define OEX_PRECISEFP OEX_FPDBUG
+#define OEX_DISMISS 0x80000 /* Dismiss invalid address faults? */
+
+#define OEX_FPU_INVAL 0x10
+#define OEX_FPU_DIV0 0x08
+#define OEX_FPU_OFLO 0x04
+#define OEX_FPU_UFLO 0x02
+#define OEX_FPU_INEX 0x01
+
+/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry. */
+
+#define OHW_R4KEOP 0x1 /* R4000 end-of-page patch. */
+#define OHW_R8KPFETCH 0x2 /* may need R8000 prefetch patch. */
+#define OHW_R5KEOP 0x4 /* R5000 end-of-page patch. */
+#define OHW_R5KCVTL 0x8 /* R5000 cvt.[ds].l bug. clean=1. */
+
+#define OPAD_PREFIX 0x1
+#define OPAD_POSTFIX 0x2
+#define OPAD_SYMBOL 0x4
+
+/* Entry found in `.options' section. */
+
+typedef struct
+{
+ Elf32_Word hwp_flags1; /* Extra flags. */
+ Elf32_Word hwp_flags2; /* Extra flags. */
+} Elf_Options_Hw;
+
+/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries. */
+
+#define OHWA0_R4KEOP_CHECKED 0x00000001
+#define OHWA1_R4KEOP_CLEAN 0x00000002
+
+/* MIPS relocs. */
+
+#define R_MIPS_NONE 0 /* No reloc */
+#define R_MIPS_16 1 /* Direct 16 bit */
+#define R_MIPS_32 2 /* Direct 32 bit */
+#define R_MIPS_REL32 3 /* PC relative 32 bit */
+#define R_MIPS_26 4 /* Direct 26 bit shifted */
+#define R_MIPS_HI16 5 /* High 16 bit */
+#define R_MIPS_LO16 6 /* Low 16 bit */
+#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
+#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
+#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
+#define R_MIPS_PC16 10 /* PC relative 16 bit */
+#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
+#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
+
+#define R_MIPS_SHIFT5 16
+#define R_MIPS_SHIFT6 17
+#define R_MIPS_64 18
+#define R_MIPS_GOT_DISP 19
+#define R_MIPS_GOT_PAGE 20
+#define R_MIPS_GOT_OFST 21
+#define R_MIPS_GOT_HI16 22
+#define R_MIPS_GOT_LO16 23
+#define R_MIPS_SUB 24
+#define R_MIPS_INSERT_A 25
+#define R_MIPS_INSERT_B 26
+#define R_MIPS_DELETE 27
+#define R_MIPS_HIGHER 28
+#define R_MIPS_HIGHEST 29
+#define R_MIPS_CALL_HI16 30
+#define R_MIPS_CALL_LO16 31
+#define R_MIPS_SCN_DISP 32
+#define R_MIPS_REL16 33
+#define R_MIPS_ADD_IMMEDIATE 34
+#define R_MIPS_PJUMP 35
+#define R_MIPS_RELGOT 36
+#define R_MIPS_JALR 37
+/* Keep this the last entry. */
+#define R_MIPS_NUM 38
+
+/* Legal values for p_type field of Elf32_Phdr. */
+
+#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
+#define PT_MIPS_RTPROC 0x70000001 /* Runtime procedure table. */
+#define PT_MIPS_OPTIONS 0x70000002
+
+/* Special program header types. */
+
+#define PF_MIPS_LOCAL 0x10000000
+
+/* Legal values for d_tag field of Elf32_Dyn. */
+
+#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime linker interface version */
+#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */
+#define DT_MIPS_ICHECKSUM 0x70000003 /* Checksum */
+#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */
+#define DT_MIPS_FLAGS 0x70000005 /* Flags */
+#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Base address */
+#define DT_MIPS_MSYM 0x70000007
+#define DT_MIPS_CONFLICT 0x70000008 /* Address of CONFLICT section */
+#define DT_MIPS_LIBLIST 0x70000009 /* Address of LIBLIST section */
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local GOT entries */
+#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of CONFLICT entries */
+#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of LIBLIST entries */
+#define DT_MIPS_SYMTABNO 0x70000011 /* Number of DYNSYM entries */
+#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */
+#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in DYNSYM */
+#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */
+#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */
+#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */
+#define DT_MIPS_DELTA_CLASS_NO 0x70000018 /* Number of entries in
+ DT_MIPS_DELTA_CLASS. */
+#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */
+#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in
+ DT_MIPS_DELTA_INSTANCE. */
+#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */
+#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in
+ DT_MIPS_DELTA_RELOC. */
+#define DT_MIPS_DELTA_SYM 0x7000001d /* Delta symbols that Delta
+ relocations refer to. */
+#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in
+ DT_MIPS_DELTA_SYM. */
+#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the
+ class declaration. */
+#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in
+ DT_MIPS_DELTA_CLASSSYM. */
+#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */
+#define DT_MIPS_PIXIE_INIT 0x70000023
+#define DT_MIPS_SYMBOL_LIB 0x70000024
+#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025
+#define DT_MIPS_LOCAL_GOTIDX 0x70000026
+#define DT_MIPS_HIDDEN_GOTIDX 0x70000027
+#define DT_MIPS_PROTECTED_GOTIDX 0x70000028
+#define DT_MIPS_OPTIONS 0x70000029 /* Address of .options. */
+#define DT_MIPS_INTERFACE 0x7000002a /* Address of .interface. */
+#define DT_MIPS_DYNSTR_ALIGN 0x7000002b
+#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */
+#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve
+ function stored in GOT. */
+#define DT_MIPS_PERF_SUFFIX 0x7000002e /* Default suffix of dso to be added
+ by rld on dlopen() calls. */
+#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */
+#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */
+#define DT_MIPS_AUX_DYNAMIC 0x70000031 /* Address of aux .dynamic. */
+#define DT_MIPS_NUM 0x32
+
+/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry. */
+
+#define RHF_NONE 0 /* No flags */
+#define RHF_QUICKSTART (1 << 0) /* Use quickstart */
+#define RHF_NOTPOT (1 << 1) /* Hash size not power of 2 */
+#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2) /* Ignore LD_LIBRARY_PATH */
+#define RHF_NO_MOVE (1 << 3)
+#define RHF_SGI_ONLY (1 << 4)
+#define RHF_GUARANTEE_INIT (1 << 5)
+#define RHF_DELTA_C_PLUS_PLUS (1 << 6)
+#define RHF_GUARANTEE_START_INIT (1 << 7)
+#define RHF_PIXIE (1 << 8)
+#define RHF_DEFAULT_DELAY_LOAD (1 << 9)
+#define RHF_REQUICKSTART (1 << 10)
+#define RHF_REQUICKSTARTED (1 << 11)
+#define RHF_CORD (1 << 12)
+#define RHF_NO_UNRES_UNDEF (1 << 13)
+#define RHF_RLD_ORDER_SAFE (1 << 14)
+
+/* Entries found in sections of type SHT_MIPS_LIBLIST. */
+
+typedef struct
+{
+ Elf32_Word l_name; /* Name (string table index) */
+ Elf32_Word l_time_stamp; /* Timestamp */
+ Elf32_Word l_checksum; /* Checksum */
+ Elf32_Word l_version; /* Interface version */
+ Elf32_Word l_flags; /* Flags */
+} Elf32_Lib;
+
+typedef struct
+{
+ Elf64_Word l_name; /* Name (string table index) */
+ Elf64_Word l_time_stamp; /* Timestamp */
+ Elf64_Word l_checksum; /* Checksum */
+ Elf64_Word l_version; /* Interface version */
+ Elf64_Word l_flags; /* Flags */
+} Elf64_Lib;
+
+
+/* Legal values for l_flags. */
+
+#define LL_NONE 0
+#define LL_EXACT_MATCH (1 << 0) /* Require exact match */
+#define LL_IGNORE_INT_VER (1 << 1) /* Ignore interface version */
+#define LL_REQUIRE_MINOR (1 << 2)
+#define LL_EXPORTS (1 << 3)
+#define LL_DELAY_LOAD (1 << 4)
+#define LL_DELTA (1 << 5)
+
+/* Entries found in sections of type SHT_MIPS_CONFLICT. */
+
+typedef Elf32_Addr Elf32_Conflict;
+
+
+/* HPPA specific definitions. */
+
+/* Legal values for e_flags field of Elf32_Ehdr. */
+
+#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
+#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
+#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
+ prediction. */
+#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
+#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
+
+/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
+
+#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
+#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
+#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
+
+/* Additional section indeces. */
+
+#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
+ symbols in ANSI C. */
+#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
+
+/* Legal values for sh_type field of Elf32_Shdr. */
+
+#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
+#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
+
+/* Legal values for sh_flags field of Elf32_Shdr. */
+
+#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
+#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
+
+/* Legal values for ST_TYPE subfield of st_info (symbol type). */
+
+#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
+
+#define STT_HP_OPAQUE (STT_LOOS + 0x1)
+#define STT_HP_STUB (STT_LOOS + 0x2)
+
+/* HPPA relocs. */
+
+#define R_PARISC_NONE 0 /* No reloc. */
+#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
+#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
+#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
+#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
+#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
+#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
+#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
+#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
+#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
+#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
+#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
+#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
+#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
+#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
+#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
+#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
+#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
+#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
+#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
+#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
+#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64 64 /* 64 bits function address. */
+#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
+#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
+#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
+#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
+#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
+#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
+#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
+#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
+#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
+#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
+#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
+#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
+#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
+#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
+#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
+#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
+#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
+#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
+#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
+#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LORESERVE 128
+#define R_PARISC_COPY 128 /* Copy relocation. */
+#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
+#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
+#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
+#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
+#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
+#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_HIRESERVE 255
+
+/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
+
+#define PT_HP_TLS (PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
+#define PT_HP_PARALLEL (PT_LOOS + 0x10)
+#define PT_HP_FASTBIND (PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
+#define PT_HP_STACK (PT_LOOS + 0x14)
+
+#define PT_PARISC_ARCHEXT 0x70000000
+#define PT_PARISC_UNWIND 0x70000001
+
+/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
+
+#define PF_PARISC_SBP 0x08000000
+
+#define PF_HP_PAGE_SIZE 0x00100000
+#define PF_HP_FAR_SHARED 0x00200000
+#define PF_HP_NEAR_SHARED 0x00400000
+#define PF_HP_CODE 0x01000000
+#define PF_HP_MODIFY 0x02000000
+#define PF_HP_LAZYSWAP 0x04000000
+#define PF_HP_SBP 0x08000000
+
+
+/* Alpha specific definitions. */
+
+/* Legal values for e_flags field of Elf64_Ehdr. */
+
+#define EF_ALPHA_32BIT 1 /* All addresses must be < 2GB. */
+#define EF_ALPHA_CANRELAX 2 /* Relocations for relaxing exist. */
+
+/* Legal values for sh_type field of Elf64_Shdr. */
+
+/* These two are primerily concerned with ECOFF debugging info. */
+#define SHT_ALPHA_DEBUG 0x70000001
+#define SHT_ALPHA_REGINFO 0x70000002
+
+/* Legal values for sh_flags field of Elf64_Shdr. */
+
+#define SHF_ALPHA_GPREL 0x10000000
+
+/* Legal values for st_other field of Elf64_Sym. */
+#define STO_ALPHA_NOPV 0x80 /* No PV required. */
+#define STO_ALPHA_STD_GPLOAD 0x88 /* PV only used for initial ldgp. */
+
+/* Alpha relocs. */
+
+#define R_ALPHA_NONE 0 /* No reloc */
+#define R_ALPHA_REFLONG 1 /* Direct 32 bit */
+#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */
+#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */
+#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */
+#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */
+#define R_ALPHA_GPDISP 6 /* Add displacement to GP */
+#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */
+#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */
+#define R_ALPHA_SREL16 9 /* PC relative 16 bit */
+#define R_ALPHA_SREL32 10 /* PC relative 32 bit */
+#define R_ALPHA_SREL64 11 /* PC relative 64 bit */
+#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */
+#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */
+#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */
+#define R_ALPHA_COPY 24 /* Copy symbol at runtime */
+#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
+#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
+#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
+#define R_ALPHA_TLS_GD_HI 28
+#define R_ALPHA_TLSGD 29
+#define R_ALPHA_TLS_LDM 30
+#define R_ALPHA_DTPMOD64 31
+#define R_ALPHA_GOTDTPREL 32
+#define R_ALPHA_DTPREL64 33
+#define R_ALPHA_DTPRELHI 34
+#define R_ALPHA_DTPRELLO 35
+#define R_ALPHA_DTPREL16 36
+#define R_ALPHA_GOTTPREL 37
+#define R_ALPHA_TPREL64 38
+#define R_ALPHA_TPRELHI 39
+#define R_ALPHA_TPRELLO 40
+#define R_ALPHA_TPREL16 41
+/* Keep this the last entry. */
+#define R_ALPHA_NUM 46
+
+/* Magic values of the LITUSE relocation addend. */
+#define LITUSE_ALPHA_ADDR 0
+#define LITUSE_ALPHA_BASE 1
+#define LITUSE_ALPHA_BYTOFF 2
+#define LITUSE_ALPHA_JSR 3
+#define LITUSE_ALPHA_TLS_GD 4
+#define LITUSE_ALPHA_TLS_LDM 5
+
+
+/* PowerPC specific declarations */
+
+/* Values for Elf32/64_Ehdr.e_flags. */
+#define EF_PPC_EMB 0x80000000 /* PowerPC embedded flag */
+
+/* Cygnus local bits below */
+#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/
+#define EF_PPC_RELOCATABLE_LIB 0x00008000 /* PowerPC -mrelocatable-lib
+ flag */
+
+/* PowerPC relocations defined by the ABIs */
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/* PowerPC relocations defined for the TLS access ABI. */
+#define R_PPC_TLS 67 /* none (sym+add)@tls */
+#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
+#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
+#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
+#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
+#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
+#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
+#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
+#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
+#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
+
+/* Keep this the last entry. */
+#define R_PPC_NUM 95
+
+/* The remaining relocs are from the Embedded ELF ABI, and are not
+ in the SVR4 ELF ABI. */
+#define R_PPC_EMB_NADDR32 101
+#define R_PPC_EMB_NADDR16 102
+#define R_PPC_EMB_NADDR16_LO 103
+#define R_PPC_EMB_NADDR16_HI 104
+#define R_PPC_EMB_NADDR16_HA 105
+#define R_PPC_EMB_SDAI16 106
+#define R_PPC_EMB_SDA2I16 107
+#define R_PPC_EMB_SDA2REL 108
+#define R_PPC_EMB_SDA21 109 /* 16 bit offset in SDA */
+#define R_PPC_EMB_MRKREF 110
+#define R_PPC_EMB_RELSEC16 111
+#define R_PPC_EMB_RELST_LO 112
+#define R_PPC_EMB_RELST_HI 113
+#define R_PPC_EMB_RELST_HA 114
+#define R_PPC_EMB_BIT_FLD 115
+#define R_PPC_EMB_RELSDA 116 /* 16 bit relative offset in SDA */
+
+/* Diab tool relocations. */
+#define R_PPC_DIAB_SDA21_LO 180 /* like EMB_SDA21, but lower 16 bit */
+#define R_PPC_DIAB_SDA21_HI 181 /* like EMB_SDA21, but high 16 bit */
+#define R_PPC_DIAB_SDA21_HA 182 /* like EMB_SDA21, adjusted high 16 */
+#define R_PPC_DIAB_RELSDA_LO 183 /* like EMB_RELSDA, but lower 16 bit */
+#define R_PPC_DIAB_RELSDA_HI 184 /* like EMB_RELSDA, but high 16 bit */
+#define R_PPC_DIAB_RELSDA_HA 185 /* like EMB_RELSDA, adjusted high 16 */
+
+/* This is a phony reloc to handle any old fashioned TOC16 references
+ that may still be in object files. */
+#define R_PPC_TOC16 255
+
+
+/* PowerPC64 relocations defined by the ABIs */
+#define R_PPC64_NONE R_PPC_NONE
+#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address */
+#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned */
+#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address */
+#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of address */
+#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of address. */
+#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
+#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned */
+#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
+#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
+#define R_PPC64_REL24 R_PPC_REL24 /* PC-rel. 26 bit, word aligned */
+#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit */
+#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
+#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
+#define R_PPC64_GOT16 R_PPC_GOT16
+#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
+#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
+#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
+
+#define R_PPC64_COPY R_PPC_COPY
+#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
+#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
+#define R_PPC64_RELATIVE R_PPC_RELATIVE
+
+#define R_PPC64_UADDR32 R_PPC_UADDR32
+#define R_PPC64_UADDR16 R_PPC_UADDR16
+#define R_PPC64_REL32 R_PPC_REL32
+#define R_PPC64_PLT32 R_PPC_PLT32
+#define R_PPC64_PLTREL32 R_PPC_PLTREL32
+#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
+#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
+#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
+
+#define R_PPC64_SECTOFF R_PPC_SECTOFF
+#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
+#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
+#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
+#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2 */
+#define R_PPC64_ADDR64 38 /* doubleword64 S + A */
+#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A) */
+#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A) */
+#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A) */
+#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A) */
+#define R_PPC64_UADDR64 43 /* doubleword64 S + A */
+#define R_PPC64_REL64 44 /* doubleword64 S + A - P */
+#define R_PPC64_PLT64 45 /* doubleword64 L + A */
+#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P */
+#define R_PPC64_TOC16 47 /* half16* S + A - .TOC */
+#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.) */
+#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.) */
+#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.) */
+#define R_PPC64_TOC 51 /* doubleword64 .TOC */
+#define R_PPC64_PLTGOT16 52 /* half16* M + A */
+#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A) */
+#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A) */
+#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A) */
+
+#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2 */
+#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2 */
+#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2 */
+#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2 */
+#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2 */
+#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2 */
+#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2 */
+#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2 */
+#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2 */
+#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2 */
+#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2 */
+
+/* PowerPC64 relocations defined for the TLS access ABI. */
+#define R_PPC64_TLS 67 /* none (sym+add)@tls */
+#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
+#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
+#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
+#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
+#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
+#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
+#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
+#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
+#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
+#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
+#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
+#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
+#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
+#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
+#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
+#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
+#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
+
+/* Keep this the last entry. */
+#define R_PPC64_NUM 107
+
+/* PowerPC64 specific values for the Dyn d_tag field. */
+#define DT_PPC64_GLINK (DT_LOPROC + 0)
+#define DT_PPC64_NUM 1
+
+
+/* ARM specific declarations */
+
+/* Processor specific flags for the ELF header e_flags field. */
+#define EF_ARM_RELEXEC 0x01
+#define EF_ARM_HASENTRY 0x02
+#define EF_ARM_INTERWORK 0x04
+#define EF_ARM_APCS_26 0x08
+#define EF_ARM_APCS_FLOAT 0x10
+#define EF_ARM_PIC 0x20
+#define EF_ARM_ALIGN8 0x40 /* 8-bit structure alignment is in use */
+#define EF_ARM_NEW_ABI 0x80
+#define EF_ARM_OLD_ABI 0x100
+
+/* Other constants defined in the ARM ELF spec. version B-01. */
+/* NB. These conflict with values defined above. */
+#define EF_ARM_SYMSARESORTED 0x04
+#define EF_ARM_DYNSYMSUSESEGIDX 0x08
+#define EF_ARM_MAPSYMSFIRST 0x10
+#define EF_ARM_EABIMASK 0XFF000000
+
+#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+
+/* Additional symbol types for Thumb */
+#define STT_ARM_TFUNC 0xd
+
+/* ARM-specific values for sh_flags */
+#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */
+#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined
+ in the input to a link step */
+
+/* ARM-specific program header flags */
+#define PF_ARM_SB 0x10000000 /* Segment contains the location
+ addressed by the static base */
+
+/* ARM relocs. */
+#define R_ARM_NONE 0 /* No reloc */
+#define R_ARM_PC24 1 /* PC relative 26 bit branch */
+#define R_ARM_ABS32 2 /* Direct 32 bit */
+#define R_ARM_REL32 3 /* PC relative 32 bit */
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5 /* Direct 16 bit */
+#define R_ARM_ABS12 6 /* Direct 12 bit */
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8 /* Direct 8 bit */
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+#define R_ARM_COPY 20 /* Copy symbol at runtime */
+#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
+#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
+#define R_ARM_RELATIVE 23 /* Adjust by program base */
+#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
+#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
+#define R_ARM_GOT32 26 /* 32 bit GOT entry */
+#define R_ARM_PLT32 27 /* 32 bit PLT address */
+#define R_ARM_ALU_PCREL_7_0 32
+#define R_ARM_ALU_PCREL_15_8 33
+#define R_ARM_ALU_PCREL_23_15 34
+#define R_ARM_LDR_SBREL_11_0 35
+#define R_ARM_ALU_SBREL_19_12 36
+#define R_ARM_ALU_SBREL_27_20 37
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
+#define R_ARM_THM_PC9 103 /* thumb conditional branch */
+#define R_ARM_RXPC25 249
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS22 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
+/* Keep this the last entry. */
+#define R_ARM_NUM 256
+
+/* IA-64 specific declarations. */
+
+/* Processor specific flags for the Ehdr e_flags field. */
+#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
+#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
+#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
+
+/* Processor specific values for the Phdr p_type field. */
+#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
+#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
+
+/* Processor specific flags for the Phdr p_flags field. */
+#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
+
+/* Processor specific values for the Shdr sh_type field. */
+#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
+#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
+
+/* Processor specific flags for the Shdr sh_flags field. */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
+
+/* Processor specific values for the Dyn d_tag field. */
+#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
+#define DT_IA_64_NUM 1
+
+/* IA-64 relocations. */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* copy relocation */
+#define R_IA64_SUB 0x85 /* Addend and symbol difference */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+
+/* SH specific declarations */
+
+/* SH relocs. */
+#define R_SH_NONE 0
+#define R_SH_DIR32 1
+#define R_SH_REL32 2
+#define R_SH_DIR8WPN 3
+#define R_SH_IND12W 4
+#define R_SH_DIR8WPL 5
+#define R_SH_DIR8WPZ 6
+#define R_SH_DIR8BP 7
+#define R_SH_DIR8W 8
+#define R_SH_DIR8L 9
+#define R_SH_SWITCH16 25
+#define R_SH_SWITCH32 26
+#define R_SH_USES 27
+#define R_SH_COUNT 28
+#define R_SH_ALIGN 29
+#define R_SH_CODE 30
+#define R_SH_DATA 31
+#define R_SH_LABEL 32
+#define R_SH_SWITCH8 33
+#define R_SH_GNU_VTINHERIT 34
+#define R_SH_GNU_VTENTRY 35
+#define R_SH_TLS_GD_32 144
+#define R_SH_TLS_LD_32 145
+#define R_SH_TLS_LDO_32 146
+#define R_SH_TLS_IE_32 147
+#define R_SH_TLS_LE_32 148
+#define R_SH_TLS_DTPMOD32 149
+#define R_SH_TLS_DTPOFF32 150
+#define R_SH_TLS_TPOFF32 151
+#define R_SH_GOT32 160
+#define R_SH_PLT32 161
+#define R_SH_COPY 162
+#define R_SH_GLOB_DAT 163
+#define R_SH_JMP_SLOT 164
+#define R_SH_RELATIVE 165
+#define R_SH_GOTOFF 166
+#define R_SH_GOTPC 167
+/* Keep this the last entry. */
+#define R_SH_NUM 256
+
+/* Additional s390 relocs */
+
+#define R_390_NONE 0 /* No reloc. */
+#define R_390_8 1 /* Direct 8 bit. */
+#define R_390_12 2 /* Direct 12 bit. */
+#define R_390_16 3 /* Direct 16 bit. */
+#define R_390_32 4 /* Direct 32 bit. */
+#define R_390_PC32 5 /* PC relative 32 bit. */
+#define R_390_GOT12 6 /* 12 bit GOT offset. */
+#define R_390_GOT32 7 /* 32 bit GOT offset. */
+#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
+#define R_390_COPY 9 /* Copy symbol at runtime. */
+#define R_390_GLOB_DAT 10 /* Create GOT entry. */
+#define R_390_JMP_SLOT 11 /* Create PLT entry. */
+#define R_390_RELATIVE 12 /* Adjust by program base. */
+#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
+#define R_390_GOTPC 14 /* 32 bit PC relative offset to GOT. */
+#define R_390_GOT16 15 /* 16 bit GOT offset. */
+#define R_390_PC16 16 /* PC relative 16 bit. */
+#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
+#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
+#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
+#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
+#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
+#define R_390_64 22 /* Direct 64 bit. */
+#define R_390_PC64 23 /* PC relative 64 bit. */
+#define R_390_GOT64 24 /* 64 bit GOT offset. */
+#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
+#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
+#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
+#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
+#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
+#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
+#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
+#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
+#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
+#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
+#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
+#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
+#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
+#define R_390_TLS_GDCALL 38 /* Tag for function call in general
+ dynamic TLS code. */
+#define R_390_TLS_LDCALL 39 /* Tag for function call in local
+ dynamic TLS code. */
+#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
+ thread local data in LE code. */
+#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
+ thread local data in LE code. */
+#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
+ block. */
+#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
+ block. */
+#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
+#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
+#define R_390_TLS_TPOFF 56 /* Negated offset in static TLS
+ block. */
+
+/* Keep this the last entry. */
+#define R_390_NUM 57
+
+/* CRIS relocations. */
+#define R_CRIS_NONE 0
+#define R_CRIS_8 1
+#define R_CRIS_16 2
+#define R_CRIS_32 3
+#define R_CRIS_8_PCREL 4
+#define R_CRIS_16_PCREL 5
+#define R_CRIS_32_PCREL 6
+#define R_CRIS_GNU_VTINHERIT 7
+#define R_CRIS_GNU_VTENTRY 8
+#define R_CRIS_COPY 9
+#define R_CRIS_GLOB_DAT 10
+#define R_CRIS_JUMP_SLOT 11
+#define R_CRIS_RELATIVE 12
+#define R_CRIS_16_GOT 13
+#define R_CRIS_32_GOT 14
+#define R_CRIS_16_GOTPLT 15
+#define R_CRIS_32_GOTPLT 16
+#define R_CRIS_32_GOTREL 17
+#define R_CRIS_32_PLT_GOTREL 18
+#define R_CRIS_32_PLT_PCREL 19
+
+#define R_CRIS_NUM 20
+
+/* AMD x86-64 relocations. */
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed PC relative
+ offset to GOT */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
+#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */
+#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */
+#define R_X86_64_TLSGD 19 /* 32 bit signed PC relative offset
+ to two GOT entries for GD symbol */
+#define R_X86_64_TLSLD 20 /* 32 bit signed PC relative offset
+ to two GOT entries for LD symbol */
+#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
+#define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset
+ to GOT entry for IE symbol */
+#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */
+
+#define R_X86_64_NUM 24
+
+#endif /* elf.h */
diff --git a/viengoos/headers.m4 b/viengoos/headers.m4
new file mode 100644
index 0000000..04f2a7f
--- /dev/null
+++ b/viengoos/headers.m4
@@ -0,0 +1,13 @@
+# headers.m4 - Autoconf snippets to install links for header files.
+# Copyright 2007 Free Software Foundation, Inc.
+# Written by Neal H. Walfield <neal@gnu.org>.
+#
+# This file is free software; as a special exception the author gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+AC_CONFIG_LINKS([include/hurd/rm.h:viengoos/rm.h])
diff --git a/viengoos/ia32-cmain.c b/viengoos/ia32-cmain.c
new file mode 100644
index 0000000..0ea9a5b
--- /dev/null
+++ b/viengoos/ia32-cmain.c
@@ -0,0 +1,146 @@
+/* ia32-cmain.c - Startup code for the ia32.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <alloca.h>
+#include <stdint.h>
+
+#include <hurd/stddef.h>
+
+#include <l4/globals.h>
+#include <l4/init.h>
+#include <l4/stubs.h>
+#include <l4/stubs-init.h>
+
+#include "viengoos.h"
+#include "multiboot.h"
+#include "boot-modules.h"
+
+
+/* Check if the bit BIT in FLAGS is set. */
+#define CHECK_FLAG(flags,bit) ((flags) & (1 << (bit)))
+
+
+/* Initialize libl4, setup the argument vector, and pass control over
+ to the main function. */
+void
+cmain (void)
+{
+ multiboot_info_t *mbi;
+ int argc = 0;
+ char **argv = 0;
+
+ l4_init ();
+ l4_init_stubs ();
+
+ mbi = (multiboot_info_t *) l4_boot_info ();
+ debug (3, "Multiboot Info: %p", mbi);
+
+ if (CHECK_FLAG (mbi->flags, 3) && mbi->mods_count > 0)
+ {
+ /* A command line is available. */
+ module_t *mod = (module_t *) mbi->mods_addr;
+ char *str = (char *) mod[0].string;
+ int nr = 0;
+
+ /* First time around we count the number of arguments. */
+ argc = 1;
+ while (*str && *str == ' ')
+ str++;
+
+ while (*str)
+ if (*(str++) == ' ')
+ {
+ while (*str && *str == ' ')
+ str++;
+ if (*str)
+ argc++;
+ }
+ argv = alloca (sizeof (char *) * (argc + 1));
+
+ /* Second time around we fill in the argv. */
+ str = (char *) mod[0].string;
+
+ while (*str && *str == ' ')
+ str++;
+ argv[nr++] = str;
+
+ while (*str)
+ {
+ if (*str == ' ')
+ {
+ *(str++) = '\0';
+ while (*str && *str == ' ')
+ str++;
+ if (*str)
+ argv[nr++] = str;
+ }
+ else
+ str++;
+ }
+ argv[nr] = 0;
+ }
+ else
+ {
+ argc = 1;
+
+ argv = alloca (sizeof (char *) * 2);
+ argv[0] = (char *) program_name;
+ argv[1] = 0;
+ }
+
+ /* Now invoke the main function. */
+ main (argc, argv);
+
+ /* Never reached. */
+}
+
+void
+find_components (void)
+{
+ multiboot_info_t *mbi = (multiboot_info_t *) l4_boot_info ();
+
+ /* Load the module information. */
+ if (CHECK_FLAG (mbi->flags, 3))
+ {
+ module_t *mod = (module_t *) mbi->mods_addr;
+ /* Skip the entry for the rootserver. */
+ mod++;
+
+ boot_module_count = mbi->mods_count - 1;
+ if (boot_module_count > BOOT_MODULES_MAX)
+ {
+ printf ("WARNING: passed %d modules but only support %d!",
+ boot_module_count, BOOT_MODULES_MAX);
+ boot_module_count = BOOT_MODULES_MAX;
+ }
+
+ int i;
+ for (i = 0; i < boot_module_count; i++)
+ {
+ boot_modules[i].start = mod[i].mod_start;
+ boot_modules[i].end = mod[i].mod_end;
+ boot_modules[i].command_line = (char *) mod[i].string;
+ }
+ }
+}
diff --git a/viengoos/ia32-crt0.S b/viengoos/ia32-crt0.S
new file mode 100644
index 0000000..94cb711
--- /dev/null
+++ b/viengoos/ia32-crt0.S
@@ -0,0 +1,47 @@
+/* ia32-crt0.S - Startup code for ia32.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#define ASM 1
+#include "multiboot.h"
+
+/* The size of our stack (16*4KB). */
+#define STACK_SIZE 0x10000
+
+ .text
+
+ .globl start, _start
+start:
+_start:
+ /* Initialize the stack pointer. */
+ movl $(stack + STACK_SIZE), %esp
+
+ /* Reset EFLAGS. */
+ pushl $0
+ popf
+
+ /* Now enter the cmain function. */
+ call cmain
+
+ /* Not reached. */
+loop: hlt
+ jmp loop
+
+ /* Our stack area. */
+ .comm stack, STACK_SIZE
diff --git a/viengoos/ia32-output.c b/viengoos/ia32-output.c
new file mode 100644
index 0000000..f4ad778
--- /dev/null
+++ b/viengoos/ia32-output.c
@@ -0,0 +1,39 @@
+/* ia32-output.c - The output drivers for the ia32.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "output.h"
+
+
+extern struct output_driver vga_output;
+extern struct output_driver serial_output;
+extern struct output_driver no_output;
+
+/* A list of all output drivers, terminated with a null pointer. */
+struct output_driver *output_drivers[] =
+ {
+ &vga_output,
+ &serial_output,
+ &no_output,
+ 0
+ };
diff --git a/viengoos/ia32-shutdown.c b/viengoos/ia32-shutdown.c
new file mode 100644
index 0000000..9aa9804
--- /dev/null
+++ b/viengoos/ia32-shutdown.c
@@ -0,0 +1,52 @@
+/* ia32-shutdown.c - Shutdown routines for the ia32.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <sys/io.h>
+
+#include "shutdown.h"
+
+
+/* There are three ways to reset an ia32 machine. The first way is to
+ make the corresponding BIOS call in real mode. The second way is
+ to program the keyboard controller to do it. The third way is to
+ triple fault the CPU by using an empty IDT and then causing a
+ fault. Any of these can fail on odd hardware. */
+void
+reset (void)
+{
+ /* We only try to program the keyboard controller. But if that
+ fails, we should try to triple fault. Alternatively, we could
+ also try to make the BIOS call. */
+
+ outb_p (0x80, 0x70);
+ inb_p (0x71);
+
+ while (inb (0x64) & 0x02)
+ ;
+
+ outb_p (0x8F, 0x70);
+ outb_p (0x00, 0x71);
+
+ outb_p (0xFE, 0x64);
+}
diff --git a/viengoos/loader.c b/viengoos/loader.c
new file mode 100644
index 0000000..7b80c51
--- /dev/null
+++ b/viengoos/loader.c
@@ -0,0 +1,164 @@
+/* loader.c - Load ELF files.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann and Neal H. Walfield.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <hurd/stddef.h>
+#include <string.h>
+
+#include "loader.h"
+#include "elf.h"
+
+#include "object.h"
+#include "as.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+void
+loader_elf_load (allocate_object_callback_t alloc,
+ struct activity *activity, struct thread *thread,
+ const char *name, l4_word_t start, l4_word_t end,
+ l4_word_t *entry)
+{
+ Elf32_Ehdr *elf = (Elf32_Ehdr *) start;
+
+ if (elf->e_ident[EI_MAG0] != ELFMAG0
+ || elf->e_ident[EI_MAG1] != ELFMAG1
+ || elf->e_ident[EI_MAG2] != ELFMAG2
+ || elf->e_ident[EI_MAG3] != ELFMAG3)
+ panic ("%s is not an ELF file", name);
+
+ if (elf->e_type != ET_EXEC)
+ panic ("%s is not an executable file", name);
+
+ if (!elf->e_phoff)
+ panic ("%s has no valid program header offset", name);
+
+ /* FIXME: Some architectures support both word sizes. */
+ if (!((elf->e_ident[EI_CLASS] == ELFCLASS32
+ && L4_WORDSIZE == 32)
+ || (elf->e_ident[EI_CLASS] == ELFCLASS64
+ && L4_WORDSIZE == 64)))
+ panic ("%s has invalid word size", name);
+
+ if (!((elf->e_ident[EI_DATA] == ELFDATA2LSB
+ && L4_BYTE_ORDER == L4_LITTLE_ENDIAN)
+ || (elf->e_ident[EI_DATA] == ELFDATA2MSB
+ && L4_BYTE_ORDER == L4_BIG_ENDIAN)))
+ panic ("%s has invalid byte order", name);
+
+#if i386
+# define elf_machine EM_386
+#elif PPC
+# define elf_machine EM_PPC
+#else
+# error Not ported to this architecture!
+#endif
+
+ if (elf->e_machine != elf_machine)
+ panic ("%s is not for this architecture", name);
+
+ /* We have an ELF file. Load it. */
+
+ int i;
+ for (i = 0; i < elf->e_phnum; i++)
+ {
+ Elf32_Phdr *ph = (Elf32_Phdr *) (start + elf->e_phoff
+ + i * elf->e_phentsize);
+ if (ph->p_type != PT_LOAD)
+ continue;
+
+ /* Load this section. */
+
+ l4_word_t addr = ph->p_paddr;
+
+ /* Offset of PH->P_PADDR in the first page. */
+ int offset = ph->p_paddr & (PAGESIZE - 1);
+ if (offset)
+ /* This section does not start on a page aligned address. It
+ may be the case that another section is on this page. If
+ so, don't allocate a new page but use the existing one. */
+ {
+ addr_t loc = ADDR (addr - offset, ADDR_BITS - PAGESIZE_LOG2);
+
+ struct object *page = NULL;
+ struct cap cap = object_lookup_rel (activity, &thread->aspace, loc,
+ cap_rpage, NULL);
+ if (cap.type != cap_void)
+ page = cap_to_object (activity, &cap);
+
+ if (! page)
+ {
+ struct cap cap = alloc (cap_page, loc).cap;
+ page = cap_to_object (activity, &cap);
+ as_insert (activity, &thread->aspace, loc,
+ object_to_cap (page), ADDR_VOID, alloc);
+ }
+
+ /* Copy the data that belongs on the first page. */
+ memcpy ((void *) page + offset,
+ (void *) start + ph->p_offset,
+ MIN (PAGESIZE - offset, ph->p_filesz));
+
+ addr = addr - offset + PAGESIZE;
+ }
+
+ /* We know process the section a page at a time. */
+ assert ((addr & (PAGESIZE - 1)) == 0);
+ for (; addr < ph->p_paddr + ph->p_memsz; addr += PAGESIZE)
+ {
+ addr_t loc = ADDR (addr, ADDR_BITS - PAGESIZE_LOG2);
+
+ /* Allocate a page. */
+ struct object *page = NULL;
+
+ if (ph->p_paddr + ph->p_memsz < addr + PAGESIZE)
+ /* We have less than a page of data to process. Another
+ section could have written data to the end of this
+ page. See if such a page has already been
+ allocated. */
+ {
+ struct cap cap = object_lookup_rel (activity,
+ &thread->aspace, loc,
+ cap_rpage, NULL);
+ if (cap.type != cap_void)
+ page = cap_to_object (activity, &cap);
+ }
+
+ if (! page)
+ {
+ struct cap cap = alloc (cap_page, loc).cap;
+ page = cap_to_object (activity, &cap);
+ as_insert (activity, &thread->aspace, loc,
+ object_to_cap (page), ADDR_VOID, alloc);
+ }
+
+ if (addr < ph->p_paddr + ph->p_filesz)
+ memcpy ((void *) page,
+ (void *) start + ph->p_offset + (addr - ph->p_paddr),
+ MIN (PAGESIZE, ph->p_paddr + ph->p_filesz - addr));
+ }
+ }
+
+ if (entry)
+ *entry = elf->e_entry;
+}
diff --git a/viengoos/loader.h b/viengoos/loader.h
new file mode 100644
index 0000000..9692edb
--- /dev/null
+++ b/viengoos/loader.h
@@ -0,0 +1,39 @@
+/* loader.h - Load ELF binary images, interfaces.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_LOADER_H
+#define RM_LOADER_H 1
+
+#include <l4/types.h>
+
+#include "cap.h"
+#include "activity.h"
+#include "thread.h"
+#include "as.h"
+
+/* Load the ELF image from START to END into memory under the name
+ NAME (also used as the name for the region of the resulting ELF
+ program). Return the entry point in ENTRY. */
+extern void loader_elf_load (allocate_object_callback_t alloc,
+ struct activity *activity, struct thread *thread,
+ const char *name, l4_word_t start, l4_word_t end,
+ l4_word_t *entry);
+
+#endif /* RM_LOADER_H */
diff --git a/viengoos/malloc-wrap.c b/viengoos/malloc-wrap.c
new file mode 100644
index 0000000..8ae7c46
--- /dev/null
+++ b/viengoos/malloc-wrap.c
@@ -0,0 +1,65 @@
+/* malloc-wrap.c - Doug Lea's malloc for the physical memory server.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+/* Configuration of Doug Lea's malloc. */
+
+#include <errno.h>
+
+#include <l4.h>
+
+#include "viengoos.h"
+
+#include <hurd/stddef.h>
+#define ABORT panic("ai!")
+
+#define ABORT_ON_ASSERT_FAILURE 0
+
+#define __STD_C 1
+#define LACKS_UNISTD_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_FCNTL_H
+#define LACKS_SYS_TYPES_H
+typedef l4_word_t size_t;
+#define LACKS_STDLIB_H
+#define LACKS_STDIO_H
+
+/* We want to use optimized versions of memset and memcpy. */
+#define HAVE_MEMCPY
+
+/* We always use the supplied mmap emulation. */
+#define HAVE_MORECORE 0
+#define HAVE_MMAP 1
+#define HAVE_MREMAP 0
+#define MMAP_CLEARS 1
+#define malloc_getpagesize PAGESIZE
+#define MMAP_AS_MORECORE_SIZE (16 * malloc_getpagesize)
+#define DEFAULT_MMAP_THRESHOLD (4 * malloc_getpagesize)
+#define USE_MALLOC_LOCK 1
+
+/* Suppress debug output in mstats(). */
+#define fprintf(...)
+
+/* Now include Doug Lea's malloc. */
+#include "malloc.c"
diff --git a/viengoos/malloc.c b/viengoos/malloc.c
new file mode 100644
index 0000000..6677730
--- /dev/null
+++ b/viengoos/malloc.c
@@ -0,0 +1,5067 @@
+/*
+ This is a version (aka dlmalloc) of malloc/free/realloc written by
+ Doug Lea and released to the public domain, as explained at
+ http://creativecommons.org/licenses/publicdomain. Send questions,
+ comments, complaints, performance data, etc to dl@cs.oswego.edu
+
+* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+* Quickstart
+
+ This library is all in one file to simplify the most common usage:
+ ftp it, compile it (-O3), and link it into another program. All of
+ the compile-time options default to reasonable values for use on
+ most platforms. You might later want to step through various
+ compile-time and dynamic tuning options.
+
+ For convenience, an include file for code using this malloc is at:
+ ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
+ You don't really need this .h file unless you call functions not
+ defined in your system include files. The .h file contains only the
+ excerpts from this file needed for using this malloc on ANSI C/C++
+ systems, so long as you haven't changed compile-time options about
+ naming and tuning parameters. If you do, then you can create your
+ own malloc.h that does include all settings by cutting at the point
+ indicated below. Note that you may already by default be using a C
+ library containing a malloc that is based on some version of this
+ malloc (for example in linux). You might still want to use the one
+ in this file to customize settings or to avoid overheads associated
+ with library versions.
+
+* Vital statistics:
+
+ Supported pointer/size_t representation: 4 or 8 bytes
+ size_t MUST be an unsigned type of the same width as
+ pointers. (If you are using an ancient system that declares
+ size_t as a signed type, or need it to be a different width
+ than pointers, you can use a previous release of this malloc
+ (e.g. 2.7.2) supporting these.)
+
+ Alignment: 8 bytes (default)
+ This suffices for nearly all current machines and C compilers.
+ However, you can define MALLOC_ALIGNMENT to be wider than this
+ if necessary (up to 128bytes), at the expense of using more space.
+
+ Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
+ 8 or 16 bytes (if 8byte sizes)
+ Each malloced chunk has a hidden word of overhead holding size
+ and status information, and additional cross-check word
+ if FOOTERS is defined.
+
+ Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
+ 8-byte ptrs: 32 bytes (including overhead)
+
+ Even a request for zero bytes (i.e., malloc(0)) returns a
+ pointer to something of the minimum allocatable size.
+ The maximum overhead wastage (i.e., number of extra bytes
+ allocated than were requested in malloc) is less than or equal
+ to the minimum size, except for requests >= mmap_threshold that
+ are serviced via mmap(), where the worst case wastage is about
+ 32 bytes plus the remainder from a system page (the minimal
+ mmap unit); typically 4096 or 8192 bytes.
+
+ Security: static-safe; optionally more or less
+ The "security" of malloc refers to the ability of malicious
+ code to accentuate the effects of errors (for example, freeing
+ space that is not currently malloc'ed or overwriting past the
+ ends of chunks) in code that calls malloc. This malloc
+ guarantees not to modify any memory locations below the base of
+ heap, i.e., static variables, even in the presence of usage
+ errors. The routines additionally detect most improper frees
+ and reallocs. All this holds as long as the static bookkeeping
+ for malloc itself is not corrupted by some other means. This
+ is only one aspect of security -- these checks do not, and
+ cannot, detect all possible programming errors.
+
+ If FOOTERS is defined nonzero, then each allocated chunk
+ carries an additional check word to verify that it was malloced
+ from its space. These check words are the same within each
+ execution of a program using malloc, but differ across
+ executions, so externally crafted fake chunks cannot be
+ freed. This improves security by rejecting frees/reallocs that
+ could corrupt heap memory, in addition to the checks preventing
+ writes to statics that are always on. This may further improve
+ security at the expense of time and space overhead. (Note that
+ FOOTERS may also be worth using with MSPACES.)
+
+ By default detected errors cause the program to abort (calling
+ "abort()"). You can override this to instead proceed past
+ errors by defining PROCEED_ON_ERROR. In this case, a bad free
+ has no effect, and a malloc that encounters a bad address
+ caused by user overwrites will ignore the bad address by
+ dropping pointers and indices to all known memory. This may
+ be appropriate for programs that should continue if at all
+ possible in the face of programming errors, although they may
+ run out of memory because dropped memory is never reclaimed.
+
+ If you don't like either of these options, you can define
+ CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
+ else. And if if you are sure that your program using malloc has
+ no errors or vulnerabilities, you can define INSECURE to 1,
+ which might (or might not) provide a small performance improvement.
+
+ Thread-safety: NOT thread-safe unless USE_LOCKS defined
+ When USE_LOCKS is defined, each public call to malloc, free,
+ etc is surrounded with either a pthread mutex or a win32
+ spinlock (depending on WIN32). This is not especially fast, and
+ can be a major bottleneck. It is designed only to provide
+ minimal protection in concurrent environments, and to provide a
+ basis for extensions. If you are using malloc in a concurrent
+ program, consider instead using ptmalloc, which is derived from
+ a version of this malloc. (See http://www.malloc.de).
+
+ System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
+ This malloc can use unix sbrk or any emulation (invoked using
+ the CALL_MORECORE macro) and/or mmap/munmap or any emulation
+ (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
+ memory. On most unix systems, it tends to work best if both
+ MORECORE and MMAP are enabled. On Win32, it uses emulations
+ based on VirtualAlloc. It also uses common C library functions
+ like memset.
+
+ Compliance: I believe it is compliant with the Single Unix Specification
+ (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
+ others as well.
+
+* Overview of algorithms
+
+ This is not the fastest, most space-conserving, most portable, or
+ most tunable malloc ever written. However it is among the fastest
+ while also being among the most space-conserving, portable and
+ tunable. Consistent balance across these factors results in a good
+ general-purpose allocator for malloc-intensive programs.
+
+ In most ways, this malloc is a best-fit allocator. Generally, it
+ chooses the best-fitting existing chunk for a request, with ties
+ broken in approximately least-recently-used order. (This strategy
+ normally maintains low fragmentation.) However, for requests less
+ than 256bytes, it deviates from best-fit when there is not an
+ exactly fitting available chunk by preferring to use space adjacent
+ to that used for the previous small request, as well as by breaking
+ ties in approximately most-recently-used order. (These enhance
+ locality of series of small allocations.) And for very large requests
+ (>= 256Kb by default), it relies on system memory mapping
+ facilities, if supported. (This helps avoid carrying around and
+ possibly fragmenting memory used only for large chunks.)
+
+ All operations (except malloc_stats and mallinfo) have execution
+ times that are bounded by a constant factor of the number of bits in
+ a size_t, not counting any clearing in calloc or copying in realloc,
+ or actions surrounding MORECORE and MMAP that have times
+ proportional to the number of non-contiguous regions returned by
+ system allocation routines, which is often just 1.
+
+ The implementation is not very modular and seriously overuses
+ macros. Perhaps someday all C compilers will do as good a job
+ inlining modular code as can now be done by brute-force expansion,
+ but now, enough of them seem not to.
+
+ Some compilers issue a lot of warnings about code that is
+ dead/unreachable only on some platforms, and also about intentional
+ uses of negation on unsigned types. All known cases of each can be
+ ignored.
+
+ For a longer but out of date high-level description, see
+ http://gee.cs.oswego.edu/dl/html/malloc.html
+
+* MSPACES
+ If MSPACES is defined, then in addition to malloc, free, etc.,
+ this file also defines mspace_malloc, mspace_free, etc. These
+ are versions of malloc routines that take an "mspace" argument
+ obtained using create_mspace, to control all internal bookkeeping.
+ If ONLY_MSPACES is defined, only these versions are compiled.
+ So if you would like to use this allocator for only some allocations,
+ and your system malloc for others, you can compile with
+ ONLY_MSPACES and then do something like...
+ static mspace mymspace = create_mspace(0,0); // for example
+ #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
+
+ (Note: If you only need one instance of an mspace, you can instead
+ use "USE_DL_PREFIX" to relabel the global malloc.)
+
+ You can similarly create thread-local allocators by storing
+ mspaces as thread-locals. For example:
+ static __thread mspace tlms = 0;
+ void* tlmalloc(size_t bytes) {
+ if (tlms == 0) tlms = create_mspace(0, 0);
+ return mspace_malloc(tlms, bytes);
+ }
+ void tlfree(void* mem) { mspace_free(tlms, mem); }
+
+ Unless FOOTERS is defined, each mspace is completely independent.
+ You cannot allocate from one and free to another (although
+ conformance is only weakly checked, so usage errors are not always
+ caught). If FOOTERS is defined, then each chunk carries around a tag
+ indicating its originating mspace, and frees are directed to their
+ originating spaces.
+
+ ------------------------- Compile-time options ---------------------------
+
+Be careful in setting #define values for numerical constants of type
+size_t. On some systems, literal values are not automatically extended
+to size_t precision unless they are explicitly casted.
+
+WIN32 default: defined if _WIN32 defined
+ Defining WIN32 sets up defaults for MS environment and compilers.
+ Otherwise defaults are for unix.
+
+MALLOC_ALIGNMENT default: (size_t)8
+ Controls the minimum alignment for malloc'ed chunks. It must be a
+ power of two and at least 8, even on machines for which smaller
+ alignments would suffice. It may be defined as larger than this
+ though. Note however that code and data structures are optimized for
+ the case of 8-byte alignment.
+
+MSPACES default: 0 (false)
+ If true, compile in support for independent allocation spaces.
+ This is only supported if HAVE_MMAP is true.
+
+ONLY_MSPACES default: 0 (false)
+ If true, only compile in mspace versions, not regular versions.
+
+USE_LOCKS default: 0 (false)
+ Causes each call to each public routine to be surrounded with
+ pthread or WIN32 mutex lock/unlock. (If set true, this can be
+ overridden on a per-mspace basis for mspace versions.)
+
+FOOTERS default: 0
+ If true, provide extra checking and dispatching by placing
+ information in the footers of allocated chunks. This adds
+ space and time overhead.
+
+INSECURE default: 0
+ If true, omit checks for usage errors and heap space overwrites.
+
+USE_DL_PREFIX default: NOT defined
+ Causes compiler to prefix all public routines with the string 'dl'.
+ This can be useful when you only want to use this malloc in one part
+ of a program, using your regular system malloc elsewhere.
+
+ABORT default: defined as abort()
+ Defines how to abort on failed checks. On most systems, a failed
+ check cannot die with an "assert" or even print an informative
+ message, because the underlying print routines in turn call malloc,
+ which will fail again. Generally, the best policy is to simply call
+ abort(). It's not very useful to do more than this because many
+ errors due to overwriting will show up as address faults (null, odd
+ addresses etc) rather than malloc-triggered checks, so will also
+ abort. Also, most compilers know that abort() does not return, so
+ can better optimize code conditionally calling it.
+
+PROCEED_ON_ERROR default: defined as 0 (false)
+ Controls whether detected bad addresses cause them to bypassed
+ rather than aborting. If set, detected bad arguments to free and
+ realloc are ignored. And all bookkeeping information is zeroed out
+ upon a detected overwrite of freed heap space, thus losing the
+ ability to ever return it from malloc again, but enabling the
+ application to proceed. If PROCEED_ON_ERROR is defined, the
+ static variable malloc_corruption_error_count is compiled in
+ and can be examined to see if errors have occurred. This option
+ generates slower code than the default abort policy.
+
+DEBUG default: NOT defined
+ The DEBUG setting is mainly intended for people trying to modify
+ this code or diagnose problems when porting to new platforms.
+ However, it may also be able to better isolate user errors than just
+ using runtime checks. The assertions in the check routines spell
+ out in more detail the assumptions and invariants underlying the
+ algorithms. The checking is fairly extensive, and will slow down
+ execution noticeably. Calling malloc_stats or mallinfo with DEBUG
+ set will attempt to check every non-mmapped allocated and free chunk
+ in the course of computing the summaries.
+
+ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
+ Debugging assertion failures can be nearly impossible if your
+ version of the assert macro causes malloc to be called, which will
+ lead to a cascade of further failures, blowing the runtime stack.
+ ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
+ which will usually make debugging easier.
+
+MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
+ The action to take before "return 0" when malloc fails to be able to
+ return memory because there is none available.
+
+HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
+ True if this system supports sbrk or an emulation of it.
+
+MORECORE default: sbrk
+ The name of the sbrk-style system routine to call to obtain more
+ memory. See below for guidance on writing custom MORECORE
+ functions. The type of the argument to sbrk/MORECORE varies across
+ systems. It cannot be size_t, because it supports negative
+ arguments, so it is normally the signed type of the same width as
+ size_t (sometimes declared as "intptr_t"). It doesn't much matter
+ though. Internally, we only call it with arguments less than half
+ the max value of a size_t, which should work across all reasonable
+ possibilities, although sometimes generating compiler warnings. See
+ near the end of this file for guidelines for creating a custom
+ version of MORECORE.
+
+MORECORE_CONTIGUOUS default: 1 (true)
+ If true, take advantage of fact that consecutive calls to MORECORE
+ with positive arguments always return contiguous increasing
+ addresses. This is true of unix sbrk. It does not hurt too much to
+ set it true anyway, since malloc copes with non-contiguities.
+ Setting it false when definitely non-contiguous saves time
+ and possibly wasted space it would take to discover this though.
+
+MORECORE_CANNOT_TRIM default: NOT defined
+ True if MORECORE cannot release space back to the system when given
+ negative arguments. This is generally necessary only if you are
+ using a hand-crafted MORECORE function that cannot handle negative
+ arguments.
+
+HAVE_MMAP default: 1 (true)
+ True if this system supports mmap or an emulation of it. If so, and
+ HAVE_MORECORE is not true, MMAP is used for all system
+ allocation. If set and HAVE_MORECORE is true as well, MMAP is
+ primarily used to directly allocate very large blocks. It is also
+ used as a backup strategy in cases where MORECORE fails to provide
+ space from system. Note: A single call to MUNMAP is assumed to be
+ able to unmap memory that may have be allocated using multiple calls
+ to MMAP, so long as they are adjacent.
+
+HAVE_MREMAP default: 1 on linux, else 0
+ If true realloc() uses mremap() to re-allocate large blocks and
+ extend or shrink allocation spaces.
+
+MMAP_CLEARS default: 1 on unix
+ True if mmap clears memory so calloc doesn't need to. This is true
+ for standard unix mmap using /dev/zero.
+
+USE_BUILTIN_FFS default: 0 (i.e., not used)
+ Causes malloc to use the builtin ffs() function to compute indices.
+ Some compilers may recognize and intrinsify ffs to be faster than the
+ supplied C version. Also, the case of x86 using gcc is special-cased
+ to an asm instruction, so is already as fast as it can be, and so
+ this setting has no effect. (On most x86s, the asm version is only
+ slightly faster than the C version.)
+
+malloc_getpagesize default: derive from system includes, or 4096.
+ The system page size. To the extent possible, this malloc manages
+ memory from the system in page-size units. This may be (and
+ usually is) a function rather than a constant. This is ignored
+ if WIN32, where page size is determined using getSystemInfo during
+ initialization.
+
+USE_DEV_RANDOM default: 0 (i.e., not used)
+ Causes malloc to use /dev/random to initialize secure magic seed for
+ stamping footers. Otherwise, the current time is used.
+
+NO_MALLINFO default: 0
+ If defined, don't compile "mallinfo". This can be a simple way
+ of dealing with mismatches between system declarations and
+ those in this file.
+
+MALLINFO_FIELD_TYPE default: size_t
+ The type of the fields in the mallinfo struct. This was originally
+ defined as "int" in SVID etc, but is more usefully defined as
+ size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
+
+REALLOC_ZERO_BYTES_FREES default: not defined
+ This should be set if a call to realloc with zero bytes should
+ be the same as a call to free. Some people think it should. Otherwise,
+ since this malloc returns a unique pointer for malloc(0), so does
+ realloc(p, 0).
+
+LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
+LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
+LACKS_STDLIB_H default: NOT defined unless on WIN32
+ Define these if your system does not have these header files.
+ You might need to manually insert some of the declarations they provide.
+
+LACKS_STDIO_H default: NOT defined
+ Define this if your system does not have this header files.
+ You might need to manually insert some of the declarations it provides.
+
+DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
+ system_info.dwAllocationGranularity in WIN32,
+ otherwise 64K.
+ Also settable using mallopt(M_GRANULARITY, x)
+ The unit for allocating and deallocating memory from the system. On
+ most systems with contiguous MORECORE, there is no reason to
+ make this more than a page. However, systems with MMAP tend to
+ either require or encourage larger granularities. You can increase
+ this value to prevent system allocation functions to be called so
+ often, especially if they are slow. The value must be at least one
+ page and must be a power of two. Setting to 0 causes initialization
+ to either page size or win32 region size. (Note: In previous
+ versions of malloc, the equivalent of this option was called
+ "TOP_PAD")
+
+DEFAULT_TRIM_THRESHOLD default: 2MB
+ Also settable using mallopt(M_TRIM_THRESHOLD, x)
+ The maximum amount of unused top-most memory to keep before
+ releasing via malloc_trim in free(). Automatic trimming is mainly
+ useful in long-lived programs using contiguous MORECORE. Because
+ trimming via sbrk can be slow on some systems, and can sometimes be
+ wasteful (in cases where programs immediately afterward allocate
+ more large chunks) the value should be high enough so that your
+ overall system performance would improve by releasing this much
+ memory. As a rough guide, you might set to a value close to the
+ average size of a process (program) running on your system.
+ Releasing this much memory would allow such a process to run in
+ memory. Generally, it is worth tuning trim thresholds when a
+ program undergoes phases where several large chunks are allocated
+ and released in ways that can reuse each other's storage, perhaps
+ mixed with phases where there are no such chunks at all. The trim
+ value must be greater than page size to have any useful effect. To
+ disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
+ some people use of mallocing a huge space and then freeing it at
+ program startup, in an attempt to reserve system memory, doesn't
+ have the intended effect under automatic trimming, since that memory
+ will immediately be returned to the system.
+
+DEFAULT_MMAP_THRESHOLD default: 256K
+ Also settable using mallopt(M_MMAP_THRESHOLD, x)
+ The request size threshold for using MMAP to directly service a
+ request. Requests of at least this size that cannot be allocated
+ using already-existing space will be serviced via mmap. (If enough
+ normal freed space already exists it is used instead.) Using mmap
+ segregates relatively large chunks of memory so that they can be
+ individually obtained and released from the host system. A request
+ serviced through mmap is never reused by any other request (at least
+ not directly; the system may just so happen to remap successive
+ requests to the same locations). Segregating space in this way has
+ the benefits that: Mmapped space can always be individually released
+ back to the system, which helps keep the system level memory demands
+ of a long-lived program low. Also, mapped memory doesn't become
+ `locked' between other chunks, as can happen with normally allocated
+ chunks, which means that even trimming via malloc_trim would not
+ release them. However, it has the disadvantage that the space
+ cannot be reclaimed, consolidated, and then used to service later
+ requests, as happens with normal chunks. The advantages of mmap
+ nearly always outweigh disadvantages for "large" chunks, but the
+ value of "large" may vary across systems. The default is an
+ empirically derived value that works well in most systems. You can
+ disable mmap by setting to MAX_SIZE_T.
+
+*/
+
+#ifndef WIN32
+#ifdef _WIN32
+#define WIN32 1
+#endif /* _WIN32 */
+#endif /* WIN32 */
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_UNISTD_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_SYS_MMAN_H
+#define LACKS_STRING_H
+#define LACKS_STRINGS_H
+#define LACKS_SYS_TYPES_H
+#define LACKS_ERRNO_H
+#define MALLOC_FAILURE_ACTION
+#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
+#endif /* WIN32 */
+
+#if defined(DARWIN) || defined(_DARWIN)
+/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
+#ifndef HAVE_MORECORE
+#define HAVE_MORECORE 0
+#define HAVE_MMAP 1
+#endif /* HAVE_MORECORE */
+#endif /* DARWIN */
+
+#ifndef LACKS_SYS_TYPES_H
+#include <sys/types.h> /* For size_t */
+#endif /* LACKS_SYS_TYPES_H */
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T (~(size_t)0)
+
+#ifndef ONLY_MSPACES
+#define ONLY_MSPACES 0
+#endif /* ONLY_MSPACES */
+#ifndef MSPACES
+#if ONLY_MSPACES
+#define MSPACES 1
+#else /* ONLY_MSPACES */
+#define MSPACES 0
+#endif /* ONLY_MSPACES */
+#endif /* MSPACES */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)8U)
+#endif /* MALLOC_ALIGNMENT */
+#ifndef FOOTERS
+#define FOOTERS 0
+#endif /* FOOTERS */
+#ifndef ABORT
+#define ABORT abort()
+#endif /* ABORT */
+#ifndef ABORT_ON_ASSERT_FAILURE
+#define ABORT_ON_ASSERT_FAILURE 1
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#ifndef PROCEED_ON_ERROR
+#define PROCEED_ON_ERROR 0
+#endif /* PROCEED_ON_ERROR */
+#ifndef USE_LOCKS
+#define USE_LOCKS 0
+#endif /* USE_LOCKS */
+#ifndef INSECURE
+#define INSECURE 0
+#endif /* INSECURE */
+#ifndef HAVE_MMAP
+#define HAVE_MMAP 1
+#endif /* HAVE_MMAP */
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 1
+#endif /* MMAP_CLEARS */
+#ifndef HAVE_MREMAP
+#ifdef linux
+#define HAVE_MREMAP 1
+#else /* linux */
+#define HAVE_MREMAP 0
+#endif /* linux */
+#endif /* HAVE_MREMAP */
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION errno = ENOMEM;
+#endif /* MALLOC_FAILURE_ACTION */
+#ifndef HAVE_MORECORE
+#if ONLY_MSPACES
+#define HAVE_MORECORE 0
+#else /* ONLY_MSPACES */
+#define HAVE_MORECORE 1
+#endif /* ONLY_MSPACES */
+#endif /* HAVE_MORECORE */
+#if !HAVE_MORECORE
+#define MORECORE_CONTIGUOUS 0
+#else /* !HAVE_MORECORE */
+#ifndef MORECORE
+#define MORECORE sbrk
+#endif /* MORECORE */
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif /* MORECORE_CONTIGUOUS */
+#endif /* HAVE_MORECORE */
+#ifndef DEFAULT_GRANULARITY
+#if MORECORE_CONTIGUOUS
+#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
+#else /* MORECORE_CONTIGUOUS */
+#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+#endif /* MORECORE_CONTIGUOUS */
+#endif /* DEFAULT_GRANULARITY */
+#ifndef DEFAULT_TRIM_THRESHOLD
+#ifndef MORECORE_CANNOT_TRIM
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#else /* MORECORE_CANNOT_TRIM */
+#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+#endif /* MORECORE_CANNOT_TRIM */
+#endif /* DEFAULT_TRIM_THRESHOLD */
+#ifndef DEFAULT_MMAP_THRESHOLD
+#if HAVE_MMAP
+#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+#else /* HAVE_MMAP */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#endif /* HAVE_MMAP */
+#endif /* DEFAULT_MMAP_THRESHOLD */
+#ifndef USE_BUILTIN_FFS
+#define USE_BUILTIN_FFS 0
+#endif /* USE_BUILTIN_FFS */
+#ifndef USE_DEV_RANDOM
+#define USE_DEV_RANDOM 0
+#endif /* USE_DEV_RANDOM */
+#ifndef NO_MALLINFO
+#define NO_MALLINFO 0
+#endif /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+
+/*
+ mallopt tuning options. SVID/XPG defines four standard parameter
+ numbers for mallopt, normally defined in malloc.h. None of these
+ are used in this malloc, so setting them has no effect. But this
+ malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD (-1)
+#define M_GRANULARITY (-2)
+#define M_MMAP_THRESHOLD (-3)
+
+/* ------------------------ Mallinfo declarations ------------------------ */
+
+#if !NO_MALLINFO
+/*
+ This version of malloc supports the standard SVID/XPG mallinfo
+ routine that returns a struct containing usage properties and
+ statistics. It should work on any system that has a
+ /usr/include/malloc.h defining struct mallinfo. The main
+ declaration needed is the mallinfo struct that is returned (by-copy)
+ by mallinfo(). The malloinfo struct contains a bunch of fields that
+ are not even meaningful in this version of malloc. These fields are
+ are instead filled by mallinfo() with other numbers that might be of
+ interest.
+
+ HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+ /usr/include/malloc.h file that includes a declaration of struct
+ mallinfo. If so, it is included; else a compliant version is
+ declared below. These must be precisely the same for mallinfo() to
+ work. The original SVID version of this struct, defined on most
+ systems with mallinfo, declares all fields as ints. But some others
+ define as unsigned long. If your system defines the fields using a
+ type of different width than listed here, you MUST #include your
+ system version and #define HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+
+struct mallinfo {
+ MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
+ MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
+ MALLINFO_FIELD_TYPE smblks; /* always 0 */
+ MALLINFO_FIELD_TYPE hblks; /* always 0 */
+ MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
+ MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
+ MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+ MALLINFO_FIELD_TYPE fordblks; /* total free space */
+ MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
+};
+
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#if !ONLY_MSPACES
+
+/* ------------------- Declarations of public routines ------------------- */
+
+#ifndef USE_DL_PREFIX
+#define dlcalloc calloc
+#define dlfree free
+#define dlmalloc malloc
+#define dlmemalign memalign
+#define dlrealloc realloc
+#define dlvalloc valloc
+#define dlpvalloc pvalloc
+#define dlmallinfo mallinfo
+#define dlmallopt mallopt
+#define dlmalloc_trim malloc_trim
+#define dlmalloc_stats malloc_stats
+#define dlmalloc_usable_size malloc_usable_size
+#define dlmalloc_footprint malloc_footprint
+#define dlmalloc_max_footprint malloc_max_footprint
+#define dlindependent_calloc independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#endif /* USE_DL_PREFIX */
+
+
+/*
+ malloc(size_t n)
+ Returns a pointer to a newly allocated chunk of at least n bytes, or
+ null if no space is available, in which case errno is set to ENOMEM
+ on ANSI C systems.
+
+ If n is zero, malloc returns a minimum-sized chunk. (The minimum
+ size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+ systems.) Note that size_t is an unsigned type, so calls with
+ arguments that would be negative if signed are interpreted as
+ requests for huge amounts of space, which will often fail. The
+ maximum supported value of n differs across systems, but is in all
+ cases less than the maximum representable value of a size_t.
+*/
+void* dlmalloc(size_t);
+
+/*
+ free(void* p)
+ Releases the chunk of memory pointed to by p, that had been previously
+ allocated using malloc or a related routine such as realloc.
+ It has no effect if p is null. If p was not malloced or already
+ freed, free(p) will by default cause the current program to abort.
+*/
+void dlfree(void*);
+
+/*
+ calloc(size_t n_elements, size_t element_size);
+ Returns a pointer to n_elements * element_size bytes, with all locations
+ set to zero.
+*/
+void* dlcalloc(size_t, size_t);
+
+/*
+ realloc(void* p, size_t n)
+ Returns a pointer to a chunk of size n that contains the same data
+ as does chunk p up to the minimum of (n, p's size) bytes, or null
+ if no space is available.
+
+ The returned pointer may or may not be the same as p. The algorithm
+ prefers extending p in most cases when possible, otherwise it
+ employs the equivalent of a malloc-copy-free sequence.
+
+ If p is null, realloc is equivalent to malloc.
+
+ If space is not available, realloc returns null, errno is set (if on
+ ANSI) and p is NOT freed.
+
+ if n is for fewer bytes than already held by p, the newly unused
+ space is lopped off and freed if possible. realloc with a size
+ argument of zero (re)allocates a minimum-sized chunk.
+
+ The old unix realloc convention of allowing the last-free'd chunk
+ to be used as an argument to realloc is not supported.
+*/
+
+void* dlrealloc(void*, size_t);
+
+/*
+ memalign(size_t alignment, size_t n);
+ Returns a pointer to a newly allocated chunk of n bytes, aligned
+ in accord with the alignment argument.
+
+ The alignment argument should be a power of two. If the argument is
+ not a power of two, the nearest greater power is used.
+ 8-byte alignment is guaranteed by normal malloc calls, so don't
+ bother calling memalign with an argument of 8 or less.
+
+ Overreliance on memalign is a sure way to fragment space.
+*/
+void* dlmemalign(size_t, size_t);
+
+/*
+ valloc(size_t n);
+ Equivalent to memalign(pagesize, n), where pagesize is the page
+ size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void* dlvalloc(size_t);
+
+/*
+ mallopt(int parameter_number, int parameter_value)
+ Sets tunable parameters The format is to provide a
+ (parameter-number, parameter-value) pair. mallopt then sets the
+ corresponding parameter to the argument value if it can (i.e., so
+ long as the value is meaningful), and returns 1 if successful else
+ 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
+ normally defined in malloc.h. None of these are use in this malloc,
+ so setting them has no effect. But this malloc also supports other
+ options in mallopt. See below for details. Briefly, supported
+ parameters are as follows (listed defaults are for "typical"
+ configurations).
+
+ Symbol param # default allowed param values
+ M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
+ M_GRANULARITY -2 page size any power of 2 >= page size
+ M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
+*/
+int dlmallopt(int, int);
+
+/*
+ malloc_footprint();
+ Returns the number of bytes obtained from the system. The total
+ number of bytes allocated by malloc, realloc etc., is less than this
+ value. Unlike mallinfo, this function returns only a precomputed
+ result, so can be called frequently to monitor memory consumption.
+ Even if locks are otherwise defined, this function does not use them,
+ so results might not be up to date.
+*/
+size_t dlmalloc_footprint(void);
+
+/*
+ malloc_max_footprint();
+ Returns the maximum number of bytes obtained from the system. This
+ value will be greater than current footprint if deallocated space
+ has been reclaimed by the system. The peak number of bytes allocated
+ by malloc, realloc etc., is less than this value. Unlike mallinfo,
+ this function returns only a precomputed result, so can be called
+ frequently to monitor memory consumption. Even if locks are
+ otherwise defined, this function does not use them, so results might
+ not be up to date.
+*/
+size_t dlmalloc_max_footprint(void);
+
+#if !NO_MALLINFO
+/*
+ mallinfo()
+ Returns (by copy) a struct containing various summary statistics:
+
+ arena: current total non-mmapped bytes allocated from system
+ ordblks: the number of free chunks
+ smblks: always zero.
+ hblks: current number of mmapped regions
+ hblkhd: total bytes held in mmapped regions
+ usmblks: the maximum total allocated space. This will be greater
+ than current total if trimming has occurred.
+ fsmblks: always zero
+ uordblks: current total allocated space (normal or mmapped)
+ fordblks: total free space
+ keepcost: the maximum number of bytes that could ideally be released
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
+
+ Because these fields are ints, but internal bookkeeping may
+ be kept as longs, the reported values may wrap around zero and
+ thus be inaccurate.
+*/
+struct mallinfo dlmallinfo(void);
+#endif /* NO_MALLINFO */
+
+/*
+ independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+ independent_calloc is similar to calloc, but instead of returning a
+ single cleared space, it returns an array of pointers to n_elements
+ independent elements that can hold contents of size elem_size, each
+ of which starts out cleared, and can be independently freed,
+ realloc'ed etc. The elements are guaranteed to be adjacently
+ allocated (this is not guaranteed to occur with multiple callocs or
+ mallocs), which may also improve cache locality in some
+ applications.
+
+ The "chunks" argument is optional (i.e., may be null, which is
+ probably the most typical usage). If it is null, the returned array
+ is itself dynamically allocated and should also be freed when it is
+ no longer needed. Otherwise, the chunks array must be of at least
+ n_elements in length. It is filled in with the pointers to the
+ chunks.
+
+ In either case, independent_calloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and "chunks"
+ is null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use regular calloc and assign pointers into this
+ space to represent elements. (In this case though, you cannot
+ independently free elements.)
+
+ independent_calloc simplifies and speeds up implementations of many
+ kinds of pools. It may also be useful when constructing large data
+ structures that initially have a fixed number of fixed-sized nodes,
+ but the number is not known at compile time, and some of the nodes
+ may later need to be freed. For example:
+
+ struct Node { int item; struct Node* next; };
+
+ struct Node* build_list() {
+ struct Node** pool;
+ int n = read_number_of_nodes_needed();
+ if (n <= 0) return 0;
+ pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+ if (pool == 0) die();
+ // organize into a linked list...
+ struct Node* first = pool[0];
+ for (i = 0; i < n-1; ++i)
+ pool[i]->next = pool[i+1];
+ free(pool); // Can now free the array (or not, if it is needed later)
+ return first;
+ }
+*/
+void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+ independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+ independent_comalloc allocates, all at once, a set of n_elements
+ chunks with sizes indicated in the "sizes" array. It returns
+ an array of pointers to these elements, each of which can be
+ independently freed, realloc'ed etc. The elements are guaranteed to
+ be adjacently allocated (this is not guaranteed to occur with
+ multiple callocs or mallocs), which may also improve cache locality
+ in some applications.
+
+ The "chunks" argument is optional (i.e., may be null). If it is null
+ the returned array is itself dynamically allocated and should also
+ be freed when it is no longer needed. Otherwise, the chunks array
+ must be of at least n_elements in length. It is filled in with the
+ pointers to the chunks.
+
+ In either case, independent_comalloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and chunks is
+ null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use a single regular malloc, and assign pointers at
+ particular offsets in the aggregate space. (In this case though, you
+ cannot independently free elements.)
+
+ independent_comallac differs from independent_calloc in that each
+ element may have a different size, and also that it does not
+ automatically clear elements.
+
+ independent_comalloc can be used to speed up allocation in cases
+ where several structs or objects must always be allocated at the
+ same time. For example:
+
+ struct Head { ... }
+ struct Foot { ... }
+
+ void send_message(char* msg) {
+ int msglen = strlen(msg);
+ size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+ void* chunks[3];
+ if (independent_comalloc(3, sizes, chunks) == 0)
+ die();
+ struct Head* head = (struct Head*)(chunks[0]);
+ char* body = (char*)(chunks[1]);
+ struct Foot* foot = (struct Foot*)(chunks[2]);
+ // ...
+ }
+
+ In general though, independent_comalloc is worth using only for
+ larger values of n_elements. For small values, you probably won't
+ detect enough difference from series of malloc calls to bother.
+
+ Overuse of independent_comalloc can increase overall memory usage,
+ since it cannot reuse existing noncontiguous small chunks that
+ might be available for some of the elements.
+*/
+void** dlindependent_comalloc(size_t, size_t*, void**);
+
+
+/*
+ pvalloc(size_t n);
+ Equivalent to valloc(minimum-page-that-holds(n)), that is,
+ round up n to nearest pagesize.
+ */
+void* dlpvalloc(size_t);
+
+/*
+ malloc_trim(size_t pad);
+
+ If possible, gives memory back to the system (via negative arguments
+ to sbrk) if there is unused memory at the `high' end of the malloc
+ pool or in unused MMAP segments. You can call this after freeing
+ large blocks of memory to potentially reduce the system-level memory
+ requirements of a program. However, it cannot guarantee to reduce
+ memory. Under some allocation patterns, some large free blocks of
+ memory will be locked between two used chunks, so they cannot be
+ given back to the system.
+
+ The `pad' argument to malloc_trim represents the amount of free
+ trailing space to leave untrimmed. If this argument is zero, only
+ the minimum amount of memory to maintain internal data structures
+ will be left. Non-zero arguments can be supplied to maintain enough
+ trailing space to service future expected allocations without having
+ to re-obtain memory from the system.
+
+ Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+int dlmalloc_trim(size_t);
+
+/*
+ malloc_usable_size(void* p);
+
+ Returns the number of bytes you can actually use in
+ an allocated chunk, which may be more than you requested (although
+ often not) due to alignment and minimum size constraints.
+ You can use this many bytes without worrying about
+ overwriting other allocated objects. This is not a particularly great
+ programming practice. malloc_usable_size can be more useful in
+ debugging and assertions, for example:
+
+ p = malloc(n);
+ assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+/*
+ malloc_stats();
+ Prints on stderr the amount of space obtained from the system (both
+ via sbrk and mmap), the maximum amount (which may be more than
+ current if malloc_trim and/or munmap got called), and the current
+ number of bytes allocated via malloc (or realloc, etc) but not yet
+ freed. Note that this is the number of bytes allocated, not the
+ number requested. It will be larger than the number requested
+ because of alignment and bookkeeping overhead. Because it includes
+ alignment wastage as being in use, this figure may be greater than
+ zero even when no user-level chunks are allocated.
+
+ The reported current and maximum system memory can be inaccurate if
+ a program makes other calls to system memory allocation functions
+ (normally sbrk) outside of malloc.
+
+ malloc_stats prints only the most commonly interesting statistics.
+ More information can be obtained by calling mallinfo.
+*/
+void dlmalloc_stats(void);
+
+#endif /* ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+ mspace is an opaque type representing an independent
+ region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+ create_mspace creates and returns a new independent space with the
+ given initial capacity, or, if 0, the default granularity size. It
+ returns null if there is no system memory available to create the
+ space. If argument locked is non-zero, the space uses a separate
+ lock to control access. The capacity of the space will grow
+ dynamically as needed to service mspace_malloc requests. You can
+ control the sizes of incremental increases of this space by
+ compiling with a different DEFAULT_GRANULARITY or dynamically
+ setting with mallopt(M_GRANULARITY, value).
+*/
+mspace create_mspace(size_t capacity, int locked);
+
+/*
+ destroy_mspace destroys the given space, and attempts to return all
+ of its memory back to the system, returning the total number of
+ bytes freed. After destruction, the results of access to all memory
+ used by the space become undefined.
+*/
+size_t destroy_mspace(mspace msp);
+
+/*
+ create_mspace_with_base uses the memory supplied as the initial base
+ of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+ space is used for bookkeeping, so the capacity must be at least this
+ large. (Otherwise 0 is returned.) When this initial space is
+ exhausted, additional memory will be obtained from the system.
+ Destroying this space will deallocate all additionally allocated
+ space (if possible) but not the initial base.
+*/
+mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+ mspace_malloc behaves as malloc, but operates within
+ the given space.
+*/
+void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+ mspace_free behaves as free, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_free is not actually needed.
+ free may be called instead of mspace_free because freed chunks from
+ any space are handled by their originating spaces.
+*/
+void mspace_free(mspace msp, void* mem);
+
+/*
+ mspace_realloc behaves as realloc, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_realloc is not actually
+ needed. realloc may be called instead of mspace_realloc because
+ realloced chunks from any space are handled by their originating
+ spaces.
+*/
+void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+ mspace_calloc behaves as calloc, but operates within
+ the given space.
+*/
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+ mspace_memalign behaves as memalign, but operates within
+ the given space.
+*/
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+ mspace_independent_calloc behaves as independent_calloc, but
+ operates within the given space.
+*/
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]);
+
+/*
+ mspace_independent_comalloc behaves as independent_comalloc, but
+ operates within the given space.
+*/
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]);
+
+/*
+ mspace_footprint() returns the number of bytes obtained from the
+ system for this space.
+*/
+size_t mspace_footprint(mspace msp);
+
+/*
+ mspace_max_footprint() returns the peak number of bytes obtained from the
+ system for this space.
+*/
+size_t mspace_max_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+ mspace_mallinfo behaves as mallinfo, but reports properties of
+ the given space.
+*/
+struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+ mspace_malloc_stats behaves as malloc_stats, but reports
+ properties of the given space.
+*/
+void mspace_malloc_stats(mspace msp);
+
+/*
+ mspace_trim behaves as malloc_trim, but
+ operates within the given space.
+*/
+int mspace_trim(mspace msp, size_t pad);
+
+/*
+ An alias for mallopt.
+*/
+int mspace_mallopt(int, int);
+
+#endif /* MSPACES */
+
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif /* __cplusplus */
+
+/*
+ ========================================================================
+ To make a fully customizable malloc.h header file, cut everything
+ above this line, put into file malloc.h, edit to suit, and #include it
+ on the next line, as well as in programs that use this malloc.
+ ========================================================================
+*/
+
+/* #include "malloc.h" */
+
+/*------------------------------ internal #includes ---------------------- */
+
+#ifdef WIN32
+#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif /* WIN32 */
+
+#ifndef LACKS_STDIO_H
+#include <stdio.h> /* for printing in malloc_stats */
+#endif
+
+#ifndef LACKS_ERRNO_H
+#include <errno.h> /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+#if FOOTERS
+#include <time.h> /* for magic initialization */
+#endif /* FOOTERS */
+#ifndef LACKS_STDLIB_H
+#include <stdlib.h> /* for abort() */
+#endif /* LACKS_STDLIB_H */
+#ifdef DEBUG
+#if ABORT_ON_ASSERT_FAILURE
+#define assert(x) if(!(x)) ABORT
+#else /* ABORT_ON_ASSERT_FAILURE */
+#include <assert.h>
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#else /* DEBUG */
+#define assert(x)
+#endif /* DEBUG */
+#ifndef LACKS_STRING_H
+#include <string.h> /* for memset etc */
+#endif /* LACKS_STRING_H */
+#if USE_BUILTIN_FFS
+#ifndef LACKS_STRINGS_H
+#include <strings.h> /* for ffs */
+#endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+#if HAVE_MMAP
+#ifndef LACKS_SYS_MMAN_H
+#include <sys/mman.h> /* for mmap */
+#endif /* LACKS_SYS_MMAN_H */
+#ifndef LACKS_FCNTL_H
+#include <fcntl.h>
+#endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+#if HAVE_MORECORE
+#ifndef LACKS_UNISTD_H
+#include <unistd.h> /* for sbrk */
+#else /* LACKS_UNISTD_H */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+extern void* sbrk(ptrdiff_t);
+#endif /* FreeBSD etc */
+#endif /* LACKS_UNISTD_H */
+#endif /* HAVE_MMAP */
+
+#ifndef WIN32
+#ifndef malloc_getpagesize
+# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
+# ifndef _SC_PAGE_SIZE
+# define _SC_PAGE_SIZE _SC_PAGESIZE
+# endif
+# endif
+# ifdef _SC_PAGE_SIZE
+# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+# else
+# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+ extern size_t getpagesize();
+# define malloc_getpagesize getpagesize()
+# else
+# ifdef WIN32 /* use supplied emulation of getpagesize */
+# define malloc_getpagesize getpagesize()
+# else
+# ifndef LACKS_SYS_PARAM_H
+# include <sys/param.h>
+# endif
+# ifdef EXEC_PAGESIZE
+# define malloc_getpagesize EXEC_PAGESIZE
+# else
+# ifdef NBPG
+# ifndef CLSIZE
+# define malloc_getpagesize NBPG
+# else
+# define malloc_getpagesize (NBPG * CLSIZE)
+# endif
+# else
+# ifdef NBPC
+# define malloc_getpagesize NBPC
+# else
+# ifdef PAGESIZE
+# define malloc_getpagesize PAGESIZE
+# else /* just guess */
+# define malloc_getpagesize ((size_t)4096U)
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+#endif
+#endif
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some plaftorms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+ If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+ checks to fail so compiler optimizer can delete code rather than
+ using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL ((void*)(MAX_SIZE_T))
+#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
+
+#if !HAVE_MMAP
+#define IS_MMAPPED_BIT (SIZE_T_ZERO)
+#define USE_MMAP_BIT (SIZE_T_ZERO)
+#define CALL_MMAP(s) MFAIL
+#define CALL_MUNMAP(a, s) (-1)
+#define DIRECT_MMAP(s) MFAIL
+
+#else /* HAVE_MMAP */
+#define IS_MMAPPED_BIT (SIZE_T_ONE)
+#define USE_MMAP_BIT (SIZE_T_ONE)
+
+#ifndef WIN32
+#define CALL_MUNMAP(a, s) munmap((a), (s))
+#define MMAP_PROT (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS MAP_ANON
+#endif /* MAP_ANON */
+#ifdef MAP_ANONYMOUS
+#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
+#else /* MAP_ANONYMOUS */
+/*
+ Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+ is unlikely to be needed, but is supplied just in case.
+*/
+#define MMAP_FLAGS (MAP_PRIVATE)
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
+ (dev_zero_fd = open("/dev/zero", O_RDWR), \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+#endif /* MAP_ANONYMOUS */
+
+#define DIRECT_MMAP(s) CALL_MMAP(s)
+#else /* WIN32 */
+
+/* Win32 MMAP via VirtualAlloc */
+static void* win32mmap(size_t size) {
+ void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ return (ptr != 0)? ptr: MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static void* win32direct_mmap(size_t size) {
+ void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+ PAGE_READWRITE);
+ return (ptr != 0)? ptr: MFAIL;
+}
+
+/* This function supports releasing coalesed segments */
+static int win32munmap(void* ptr, size_t size) {
+ MEMORY_BASIC_INFORMATION minfo;
+ char* cptr = ptr;
+ while (size) {
+ if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+ return -1;
+ if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+ minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+ return -1;
+ if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+ return -1;
+ cptr += minfo.RegionSize;
+ size -= minfo.RegionSize;
+ }
+ return 0;
+}
+
+#define CALL_MMAP(s) win32mmap(s)
+#define CALL_MUNMAP(a, s) win32munmap((a), (s))
+#define DIRECT_MMAP(s) win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MMAP && HAVE_MREMAP
+#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#else /* HAVE_MMAP && HAVE_MREMAP */
+#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+#if HAVE_MORECORE
+#define CALL_MORECORE(S) MORECORE(S)
+#else /* HAVE_MORECORE */
+#define CALL_MORECORE(S) MFAIL
+#endif /* HAVE_MORECORE */
+
+/* mstate bit set if continguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT (8U)
+
+
+/* --------------------------- Lock preliminaries ------------------------ */
+
+#if USE_LOCKS
+
+/*
+ When locks are defined, there are up to two global locks:
+
+ * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
+ MORECORE. In many cases sys_alloc requires two calls, that should
+ not be interleaved with calls by other threads. This does not
+ protect against direct calls to MORECORE by other threads not
+ using this lock, so there is still code to cope the best we can on
+ interference.
+
+ * magic_init_mutex ensures that mparams.magic and other
+ unique mparams values are initialized only once.
+*/
+
+#ifndef WIN32
+/* By default use posix locks */
+#include <pthread.h>
+#define MLOCK_T pthread_mutex_t
+#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
+#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
+#define RELEASE_LOCK(l) pthread_mutex_unlock(l)
+
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif /* HAVE_MORECORE */
+
+static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#else /* WIN32 */
+/*
+ Because lock-protected regions have bounded times, and there
+ are no recursive lock calls, we can use simple spinlocks.
+*/
+
+#define MLOCK_T long
+static int win32_acquire_lock (MLOCK_T *sl) {
+ for (;;) {
+#ifdef InterlockedCompareExchangePointer
+ if (!InterlockedCompareExchange(sl, 1, 0))
+ return 0;
+#else /* Use older void* version */
+ if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
+ return 0;
+#endif /* InterlockedCompareExchangePointer */
+ Sleep (0);
+ }
+}
+
+static void win32_release_lock (MLOCK_T *sl) {
+ InterlockedExchange (sl, 0);
+}
+
+#define INITIAL_LOCK(l) *(l)=0
+#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
+#define RELEASE_LOCK(l) win32_release_lock(l)
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex;
+#endif /* HAVE_MORECORE */
+static MLOCK_T magic_init_mutex;
+#endif /* WIN32 */
+
+#define USE_LOCK_BIT (2U)
+#else /* USE_LOCKS */
+#define USE_LOCK_BIT (0U)
+#define INITIAL_LOCK(l)
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS && HAVE_MORECORE
+#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
+#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
+#else /* USE_LOCKS && HAVE_MORECORE */
+#define ACQUIRE_MORECORE_LOCK()
+#define RELEASE_MORECORE_LOCK()
+#endif /* USE_LOCKS && HAVE_MORECORE */
+
+#if USE_LOCKS
+#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
+#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
+#else /* USE_LOCKS */
+#define ACQUIRE_MAGIC_INIT_LOCK()
+#define RELEASE_MAGIC_INIT_LOCK()
+#endif /* USE_LOCKS */
+
+
+/* ----------------------- Chunk representations ------------------------ */
+
+/*
+ (The following includes lightly edited explanations by Colin Plumb.)
+
+ The malloc_chunk declaration below is misleading (but accurate and
+ necessary). It declares a "view" into memory allowing access to
+ necessary fields at known offsets from a given base.
+
+ Chunks of memory are maintained using a `boundary tag' method as
+ originally described by Knuth. (See the paper by Paul Wilson
+ ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
+ techniques.) Sizes of free chunks are stored both in the front of
+ each chunk and at the end. This makes consolidating fragmented
+ chunks into bigger chunks fast. The head fields also hold bits
+ representing whether chunks are free or in use.
+
+ Here are some pictures to make it clearer. They are "exploded" to
+ show that the state of a chunk can be thought of as extending from
+ the high 31 bits of the head field of its header through the
+ prev_foot and PINUSE_BIT bit of the following chunk header.
+
+ A chunk that's in use looks like:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk (if P = 1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+ | Size of this chunk 1| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ +- -+
+ | |
+ +- -+
+ | :
+ +- size - sizeof(size_t) available payload bytes -+
+ : |
+ chunk-> +- -+
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
+ | Size of next chunk (may or may not be in use) | +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ And if it's free, it looks like this:
+
+ chunk-> +- -+
+ | User payload (must be in use, or we would have merged!) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+ | Size of this chunk 0| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Next pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Prev pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :
+ +- size - sizeof(struct chunk) unused bytes -+
+ : |
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of this chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
+ | Size of next chunk (must be in use, or we would have merged)| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :
+ +- User payload -+
+ : |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |0|
+ +-+
+ Note that since we always merge adjacent free chunks, the chunks
+ adjacent to a free chunk must be in use.
+
+ Given a pointer to a chunk (which can be derived trivially from the
+ payload pointer) we can, in O(1) time, find out whether the adjacent
+ chunks are free, and if so, unlink them from the lists that they
+ are on and merge them with the current chunk.
+
+ Chunks always begin on even word boundaries, so the mem portion
+ (which is returned to the user) is also on an even word boundary, and
+ thus at least double-word aligned.
+
+ The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
+ chunk size (which is always a multiple of two words), is an in-use
+ bit for the *previous* chunk. If that bit is *clear*, then the
+ word before the current chunk size contains the previous chunk
+ size, and can be used to find the front of the previous chunk.
+ The very first chunk allocated always has this bit set, preventing
+ access to non-existent (or non-owned) memory. If pinuse is set for
+ any given chunk, then you CANNOT determine the size of the
+ previous chunk, and might even get a memory addressing fault when
+ trying to do so.
+
+ The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
+ the chunk size redundantly records whether the current chunk is
+ inuse. This redundancy enables usage checks within free and realloc,
+ and reduces indirection when freeing and consolidating chunks.
+
+ Each freshly allocated chunk must have both cinuse and pinuse set.
+ That is, each allocated chunk borders either a previously allocated
+ and still in-use chunk, or the base of its memory arena. This is
+ ensured by making all allocations from the the `lowest' part of any
+ found chunk. Further, no free chunk physically borders another one,
+ so each free chunk is known to be preceded and followed by either
+ inuse chunks or the ends of memory.
+
+ Note that the `foot' of the current chunk is actually represented
+ as the prev_foot of the NEXT chunk. This makes it easier to
+ deal with alignments etc but can be very confusing when trying
+ to extend or adapt this code.
+
+ The exceptions to all this are
+
+ 1. The special chunk `top' is the top-most available chunk (i.e.,
+ the one bordering the end of available memory). It is treated
+ specially. Top is never included in any bin, is used only if
+ no other chunk is available, and is released back to the
+ system if it is very large (see M_TRIM_THRESHOLD). In effect,
+ the top chunk is treated as larger (and thus less well
+ fitting) than any other available chunk. The top chunk
+ doesn't update its trailing size field since there is no next
+ contiguous chunk that would have to index off it. However,
+ space is still allocated for it (TOP_FOOT_SIZE) to enable
+ separation or merging when space is extended.
+
+ 3. Chunks allocated via mmap, which have the lowest-order bit
+ (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
+ PINUSE_BIT in their head fields. Because they are allocated
+ one-by-one, each must carry its own prev_foot field, which is
+ also used to hold the offset this chunk has within its mmapped
+ region, which is needed to preserve alignment. Each mmapped
+ chunk is trailed by the first two fields of a fake next-chunk
+ for sake of usage checks.
+
+*/
+
+struct malloc_chunk {
+ size_t prev_foot; /* Size of previous chunk (if free). */
+ size_t head; /* Size and inuse bits. */
+ struct malloc_chunk* fd; /* double links -- used only if free. */
+ struct malloc_chunk* bk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
+typedef unsigned int bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+#if FOOTERS
+#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+#else /* FOOTERS */
+#define CHUNK_OVERHEAD (SIZE_T_SIZE)
+#endif /* FOOTERS */
+
+/* MMapped chunks need a second word of overhead ... */
+#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE\
+ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define pad_request(req) \
+ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define request2size(req) \
+ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+/*
+ The head field of a chunk is or'ed with PINUSE_BIT when previous
+ adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+ use. If the chunk was obtained with mmap, the prev_foot field has
+ IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
+ mmapped region to the base of the chunk.
+*/
+
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from head words */
+#define cinuse(p) ((p)->head & CINUSE_BIT)
+#define pinuse(p) ((p)->head & PINUSE_BIT)
+#define chunksize(p) ((p)->head & ~(INUSE_BITS))
+
+#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
+#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
+#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
+
+/* extract next chunk's pinuse bit */
+#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
+#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
+
+/* Set size, pinuse bit, and foot */
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+ ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
+
+/* Set size, pinuse bit, foot, and clear next pinuse */
+#define set_free_with_pinuse(p, s, n)\
+ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
+
+#define is_mmapped(p)\
+ (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define overhead_for(p)\
+ (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* Return true if malloced space is not necessarily cleared */
+#if MMAP_CLEARS
+#define calloc_must_clear(p) (!is_mmapped(p))
+#else /* MMAP_CLEARS */
+#define calloc_must_clear(p) (1)
+#endif /* MMAP_CLEARS */
+
+/* ---------------------- Overlaid data structures ----------------------- */
+
+/*
+ When chunks are not in use, they are treated as nodes of either
+ lists or trees.
+
+ "Small" chunks are stored in circular doubly-linked lists, and look
+ like this:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space (may be 0 bytes long) .
+ . .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Larger chunks are kept in a form of bitwise digital trees (aka
+ tries) keyed on chunksizes. Because malloc_tree_chunks are only for
+ free chunks greater than 256 bytes, their size doesn't impose any
+ constraints on user chunk sizes. Each node looks like:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk of same size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk of same size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to left child (child[0]) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to right child (child[1]) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to parent |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | bin index of this chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Each tree holding treenodes is a tree of unique chunk sizes. Chunks
+ of the same size are arranged in a circularly-linked list, with only
+ the oldest chunk (the next to be used, in our FIFO ordering)
+ actually in the tree. (Tree members are distinguished by a non-null
+ parent pointer.) If a chunk with the same size an an existing node
+ is inserted, it is linked off the existing node using pointers that
+ work in the same way as fd/bk pointers of small chunks.
+
+ Each tree contains a power of 2 sized range of chunk sizes (the
+ smallest is 0x100 <= x < 0x180), which is is divided in half at each
+ tree level, with the chunks in the smaller half of the range (0x100
+ <= x < 0x140 for the top nose) in the left subtree and the larger
+ half (0x140 <= x < 0x180) in the right subtree. This is, of course,
+ done by inspecting individual bits.
+
+ Using these rules, each node's left subtree contains all smaller
+ sizes than its right subtree. However, the node at the root of each
+ subtree has no particular ordering relationship to either. (The
+ dividing line between the subtree sizes is based on trie relation.)
+ If we remove the last chunk of a given size from the interior of the
+ tree, we need to replace it with a leaf node. The tree ordering
+ rules permit a node to be replaced by any leaf below it.
+
+ The smallest chunk in a tree (a common operation in a best-fit
+ allocator) can be found by walking a path to the leftmost leaf in
+ the tree. Unlike a usual binary tree, where we follow left child
+ pointers until we reach a null, here we follow the right child
+ pointer any time the left one is null, until we reach a leaf with
+ both child pointers null. The smallest chunk in the tree will be
+ somewhere along that path.
+
+ The worst case number of steps to add, find, or remove a node is
+ bounded by the number of bits differentiating chunks within
+ bins. Under current bin calculations, this ranges from 6 up to 21
+ (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
+ is of course much better.
+*/
+
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t prev_foot;
+ size_t head;
+ struct malloc_tree_chunk* fd;
+ struct malloc_tree_chunk* bk;
+
+ struct malloc_tree_chunk* child[2];
+ struct malloc_tree_chunk* parent;
+ bindex_t index;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
+
+/* ----------------------------- Segments -------------------------------- */
+
+/*
+ Each malloc space may include non-contiguous segments, held in a
+ list headed by an embedded malloc_segment record representing the
+ top-most space. Segments also include flags holding properties of
+ the space. Large chunks that are directly allocated by mmap are not
+ included in this list. They are instead independently created and
+ destroyed without otherwise keeping track of them.
+
+ Segment management mainly comes into play for spaces allocated by
+ MMAP. Any call to MMAP might or might not return memory that is
+ adjacent to an existing segment. MORECORE normally contiguously
+ extends the current space, so this space is almost always adjacent,
+ which is simpler and faster to deal with. (This is why MORECORE is
+ used preferentially to MMAP when both are available -- see
+ sys_alloc.) When allocating using MMAP, we don't use any of the
+ hinting mechanisms (inconsistently) supported in various
+ implementations of unix mmap, or distinguish reserving from
+ committing memory. Instead, we just ask for space, and exploit
+ contiguity when we get it. It is probably possible to do
+ better than this on some systems, but no general scheme seems
+ to be significantly better.
+
+ Management entails a simpler variant of the consolidation scheme
+ used for chunks to reduce fragmentation -- new adjacent memory is
+ normally prepended or appended to an existing segment. However,
+ there are limitations compared to chunk consolidation that mostly
+ reflect the fact that segment processing is relatively infrequent
+ (occurring only when getting memory from system) and that we
+ don't expect to have huge numbers of segments:
+
+ * Segments are not indexed, so traversal requires linear scans. (It
+ would be possible to index these, but is not worth the extra
+ overhead and complexity for most programs on most platforms.)
+ * New segments are only appended to old ones when holding top-most
+ memory; if they cannot be prepended to others, they are held in
+ different segments.
+
+ Except for the top-most segment of an mstate, each segment record
+ is kept at the tail of its segment. Segments are added by pushing
+ segment records onto the list headed by &mstate.seg for the
+ containing mstate.
+
+ Segment flags control allocation/merge/deallocation policies:
+ * If EXTERN_BIT set, then we did not allocate this segment,
+ and so should not try to deallocate or merge with others.
+ (This currently holds only for the initial segment passed
+ into create_mspace_with_base.)
+ * If IS_MMAPPED_BIT set, the segment may be merged with
+ other surrounding mmapped segments and trimmed/de-allocated
+ using munmap.
+ * If neither bit is set, then the segment was obtained using
+ MORECORE so can be merged with surrounding MORECORE'd segments
+ and deallocated/trimmed using MORECORE with negative arguments.
+*/
+
+struct malloc_segment {
+ char* base; /* base address */
+ size_t size; /* allocated size */
+ struct malloc_segment* next; /* ptr to next segment */
+ flag_t sflags; /* mmap and extern flag */
+};
+
+#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
+#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment* msegmentptr;
+
+/* ---------------------------- malloc_state ----------------------------- */
+
+/*
+ A malloc_state holds all of the bookkeeping for a space.
+ The main fields are:
+
+ Top
+ The topmost chunk of the currently active segment. Its size is
+ cached in topsize. The actual size of topmost space is
+ topsize+TOP_FOOT_SIZE, which includes space reserved for adding
+ fenceposts and segment records if necessary when getting more
+ space from the system. The size at which to autotrim top is
+ cached from mparams in trim_check, except that it is disabled if
+ an autotrim fails.
+
+ Designated victim (dv)
+ This is the preferred chunk for servicing small requests that
+ don't have exact fits. It is normally the chunk split off most
+ recently to service another small request. Its size is cached in
+ dvsize. The link fields of this chunk are not maintained since it
+ is not kept in a bin.
+
+ SmallBins
+ An array of bin headers for free chunks. These bins hold chunks
+ with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
+ chunks of all the same size, spaced 8 bytes apart. To simplify
+ use in double-linked lists, each bin header acts as a malloc_chunk
+ pointing to the real first node, if it exists (else pointing to
+ itself). This avoids special-casing for headers. But to avoid
+ waste, we allocate only the fd/bk pointers of bins, and then use
+ repositioning tricks to treat these as the fields of a chunk.
+
+ TreeBins
+ Treebins are pointers to the roots of trees holding a range of
+ sizes. There are 2 equally spaced treebins for each power of two
+ from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
+ larger.
+
+ Bin maps
+ There is one bit map for small bins ("smallmap") and one for
+ treebins ("treemap). Each bin sets its bit when non-empty, and
+ clears the bit when empty. Bit operations are then used to avoid
+ bin-by-bin searching -- nearly all "search" is done without ever
+ looking at bins that won't be selected. The bit maps
+ conservatively use 32 bits per map word, even if on 64bit system.
+ For a good description of some of the bit-based techniques used
+ here, see Henry S. Warren Jr's book "Hacker's Delight" (and
+ supplement at http://hackersdelight.org/). Many of these are
+ intended to reduce the branchiness of paths through malloc etc, as
+ well as to reduce the number of memory locations read or written.
+
+ Segments
+ A list of segments headed by an embedded malloc_segment record
+ representing the initial space.
+
+ Address check support
+ The least_addr field is the least address ever obtained from
+ MORECORE or MMAP. Attempted frees and reallocs of any address less
+ than this are trapped (unless INSECURE is defined).
+
+ Magic tag
+ A cross-check field that should always hold same value as mparams.magic.
+
+ Flags
+ Bits recording whether to use MMAP, locks, or contiguous MORECORE
+
+ Statistics
+ Each space keeps track of current and maximum system memory
+ obtained via MORECORE or MMAP.
+
+ Locking
+ If USE_LOCKS is defined, the "mutex" lock is acquired and released
+ around every public call using this mspace.
+*/
+
+/* Bin types, widths and sizes */
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+struct malloc_state {
+ binmap_t smallmap;
+ binmap_t treemap;
+ size_t dvsize;
+ size_t topsize;
+ char* least_addr;
+ mchunkptr dv;
+ mchunkptr top;
+ size_t trim_check;
+ size_t magic;
+ mchunkptr smallbins[(NSMALLBINS+1)*2];
+ tbinptr treebins[NTREEBINS];
+ size_t footprint;
+ size_t max_footprint;
+ flag_t mflags;
+#if USE_LOCKS
+ MLOCK_T mutex; /* locate lock among fields that rarely change */
+#endif /* USE_LOCKS */
+ msegment seg;
+};
+
+typedef struct malloc_state* mstate;
+
+/* ------------- Global malloc_state and malloc_params ------------------- */
+
+/*
+ malloc_params holds global properties, including those that can be
+ dynamically set using mallopt. There is a single instance, mparams,
+ initialized in init_mparams.
+*/
+
+struct malloc_params {
+ size_t magic;
+ size_t page_size;
+ size_t granularity;
+ size_t mmap_threshold;
+ size_t trim_threshold;
+ flag_t default_mflags;
+};
+
+static struct malloc_params mparams;
+
+/* The global malloc_state used for all non-"mspace" calls */
+static struct malloc_state _gm_;
+#define gm (&_gm_)
+#define is_global(M) ((M) == &_gm_)
+#define is_initialized(M) ((M)->top != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* Operations on mflags */
+
+#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
+#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
+#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
+
+#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
+#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
+#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
+
+#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
+#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
+
+#define set_lock(M,L)\
+ ((M)->mflags = (L)?\
+ ((M)->mflags | USE_LOCK_BIT) :\
+ ((M)->mflags & ~USE_LOCK_BIT))
+
+/* page-align a size */
+#define page_align(S)\
+ (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
+
+/* granularity-align a size */
+#define granularity_align(S)\
+ (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
+
+#define is_page_aligned(S)\
+ (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
+#define is_granularity_aligned(S)\
+ (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
+
+/* True if segment S holds address A */
+#define segment_holds(S, A)\
+ ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
+
+/* Return segment holding given address */
+static msegmentptr segment_holding(mstate m, char* addr) {
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if (addr >= sp->base && addr < sp->base + sp->size)
+ return sp;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+/* Return true if segment contains a segment link */
+static int has_segment_link(mstate m, msegmentptr ss) {
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
+ return 1;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+#ifndef MORECORE_CANNOT_TRIM
+#define should_trim(M,s) ((s) > (M)->trim_check)
+#else /* MORECORE_CANNOT_TRIM */
+#define should_trim(M,s) (0)
+#endif /* MORECORE_CANNOT_TRIM */
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE\
+ (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+
+/* ------------------------------- Hooks -------------------------------- */
+
+/*
+ PREACTION should be defined to return 0 on success, and nonzero on
+ failure. If you are not using locking, you can redefine these to do
+ anything you like.
+*/
+
+#if USE_LOCKS
+
+/* Ensure locks are initialized */
+#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
+
+#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
+#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
+#else /* USE_LOCKS */
+
+#ifndef PREACTION
+#define PREACTION(M) (0)
+#endif /* PREACTION */
+
+#ifndef POSTACTION
+#define POSTACTION(M)
+#endif /* POSTACTION */
+
+#endif /* USE_LOCKS */
+
+/*
+ CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
+ USAGE_ERROR_ACTION is triggered on detected bad frees and
+ reallocs. The argument p is an address that might have triggered the
+ fault. It is ignored by the two predefined actions, but might be
+ useful in custom actions that try to help diagnose errors.
+*/
+
+#if PROCEED_ON_ERROR
+
+/* A count of the number of corruption errors causing resets */
+int malloc_corruption_error_count;
+
+/* default corruption action */
+static void reset_on_error(mstate m);
+
+#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
+#define USAGE_ERROR_ACTION(m, p)
+
+#else /* PROCEED_ON_ERROR */
+
+#ifndef CORRUPTION_ERROR_ACTION
+#define CORRUPTION_ERROR_ACTION(m) ABORT
+#endif /* CORRUPTION_ERROR_ACTION */
+
+#ifndef USAGE_ERROR_ACTION
+#define USAGE_ERROR_ACTION(m,p) ABORT
+#endif /* USAGE_ERROR_ACTION */
+
+#endif /* PROCEED_ON_ERROR */
+
+/* -------------------------- Debugging setup ---------------------------- */
+
+#if ! DEBUG
+
+#define check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)
+#define check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)
+#define check_malloc_state(M)
+#define check_top_chunk(M,P)
+
+#else /* DEBUG */
+#define check_free_chunk(M,P) do_check_free_chunk(M,P)
+#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
+#define check_top_chunk(M,P) do_check_top_chunk(M,P)
+#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
+#define check_malloc_state(M) do_check_malloc_state(M)
+
+static void do_check_any_chunk(mstate m, mchunkptr p);
+static void do_check_top_chunk(mstate m, mchunkptr p);
+static void do_check_mmapped_chunk(mstate m, mchunkptr p);
+static void do_check_inuse_chunk(mstate m, mchunkptr p);
+static void do_check_free_chunk(mstate m, mchunkptr p);
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
+static void do_check_tree(mstate m, tchunkptr t);
+static void do_check_treebin(mstate m, bindex_t i);
+static void do_check_smallbin(mstate m, bindex_t i);
+static void do_check_malloc_state(mstate m);
+static int bin_find(mstate m, mchunkptr x);
+static size_t traverse_and_check(mstate m);
+#endif /* DEBUG */
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s) ((s) >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i) (&((M)->treebins[i]))
+
+/* assign tree index for size S to variable I */
+#if defined(__GNUC__) && defined(i386)
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int K;\
+ __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+#else /* GNUC */
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int Y = (unsigned int)X;\
+ unsigned int N = ((Y - 0x100) >> 16) & 8;\
+ unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
+ N += K;\
+ N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
+ K = 14 - N + ((Y <<= K) >> 15);\
+ I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
+ }\
+}
+#endif /* GNUC */
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define bit_for_tree_index(i) \
+ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define leftshift_for_tree_index(i) \
+ ((i == NTREEBINS-1)? 0 : \
+ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define minsize_for_tree_index(i) \
+ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
+ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+
+/* ------------------------ Operations on bin maps ----------------------- */
+
+/* bit corresponding to given index */
+#define idx2bit(i) ((binmap_t)(1) << (i))
+
+/* Mark/Clear bits with given index */
+#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
+#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
+
+#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
+#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
+#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
+
+/* index corresponding to given bit */
+
+#if defined(__GNUC__) && defined(i386)
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int J;\
+ __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
+ I = (bindex_t)J;\
+}
+
+#else /* GNUC */
+#if USE_BUILTIN_FFS
+#define compute_bit2idx(X, I) I = ffs(X)-1
+
+#else /* USE_BUILTIN_FFS */
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int Y = X - 1;\
+ unsigned int K = Y >> (16-4) & 16;\
+ unsigned int N = K; Y >>= K;\
+ N += K = Y >> (8-3) & 8; Y >>= K;\
+ N += K = Y >> (4-2) & 4; Y >>= K;\
+ N += K = Y >> (2-1) & 2; Y >>= K;\
+ N += K = Y >> (1-0) & 1; Y >>= K;\
+ I = (bindex_t)(N + Y);\
+}
+#endif /* USE_BUILTIN_FFS */
+#endif /* GNUC */
+
+/* isolate the least set bit of a bitmap */
+#define least_bit(x) ((x) & -(x))
+
+/* mask with all bits to left of least bit of x on */
+#define left_bits(x) ((x<<1) | -(x<<1))
+
+/* mask with all bits to left of or equal to least bit of x on */
+#define same_or_left_bits(x) ((x) | -(x))
+
+
+/* ----------------------- Runtime Check Support ------------------------- */
+
+/*
+ For security, the main invariant is that malloc/free/etc never
+ writes to a static address other than malloc_state, unless static
+ malloc_state itself has been corrupted, which cannot occur via
+ malloc (because of these checks). In essence this means that we
+ believe all pointers, sizes, maps etc held in malloc_state, but
+ check all of those linked or offsetted from other embedded data
+ structures. These checks are interspersed with main code in a way
+ that tends to minimize their run-time cost.
+
+ When FOOTERS is defined, in addition to range checking, we also
+ verify footer fields of inuse chunks, which can be used guarantee
+ that the mstate controlling malloc/free is intact. This is a
+ streamlined version of the approach described by William Robertson
+ et al in "Run-time Detection of Heap-based Overflows" LISA'03
+ http://www.usenix.org/events/lisa03/tech/robertson.html The footer
+ of an inuse chunk holds the xor of its mstate and a random seed,
+ that is checked upon calls to free() and realloc(). This is
+ (probablistically) unguessable from outside the program, but can be
+ computed by any code successfully malloc'ing any chunk, so does not
+ itself provide protection against code that has already broken
+ security through some other means. Unlike Robertson et al, we
+ always dynamically check addresses of all offset chunks (previous,
+ next, etc). This turns out to be cheaper than relying on hashes.
+*/
+
+#if !INSECURE
+/* Check if address a is at least as high as any from MORECORE or MMAP */
+#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
+/* Check if address of next chunk n is higher than base chunk p */
+#define ok_next(p, n) ((char*)(p) < (char*)(n))
+/* Check if p has its cinuse bit on */
+#define ok_cinuse(p) cinuse(p)
+/* Check if p has its pinuse bit on */
+#define ok_pinuse(p) pinuse(p)
+
+#else /* !INSECURE */
+#define ok_address(M, a) (1)
+#define ok_next(b, n) (1)
+#define ok_cinuse(p) (1)
+#define ok_pinuse(p) (1)
+#endif /* !INSECURE */
+
+#if (FOOTERS && !INSECURE)
+/* Check if (alleged) mstate m has expected magic field */
+#define ok_magic(M) ((M)->magic == mparams.magic)
+#else /* (FOOTERS && !INSECURE) */
+#define ok_magic(M) (1)
+#endif /* (FOOTERS && !INSECURE) */
+
+
+/* In gcc, use __builtin_expect to minimize impact of checks */
+#if !INSECURE
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define RTCHECK(e) __builtin_expect(e, 1)
+#else /* GNUC */
+#define RTCHECK(e) (e)
+#endif /* GNUC */
+#else /* !INSECURE */
+#define RTCHECK(e) (1)
+#endif /* !INSECURE */
+
+/* macros to set up inuse chunks with or without footers */
+
+#if !FOOTERS
+
+#define mark_inuse_foot(M,p,s)
+
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
+
+#else /* FOOTERS */
+
+/* Set foot of inuse chunk to be xor of mstate and seed */
+#define mark_inuse_foot(M,p,s)\
+ (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
+
+#define get_mstate_for(p)\
+ ((mstate)(((mchunkptr)((char*)(p) +\
+ (chunksize(p))))->prev_foot ^ mparams.magic))
+
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
+ mark_inuse_foot(M,p,s))
+
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
+ mark_inuse_foot(M,p,s))
+
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ mark_inuse_foot(M, p, s))
+
+#endif /* !FOOTERS */
+
+/* ---------------------------- setting mparams -------------------------- */
+
+/* Initialize mparams */
+static int init_mparams(void) {
+ if (mparams.page_size == 0) {
+ size_t s;
+
+ mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+ mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
+#if MORECORE_CONTIGUOUS
+ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
+#else /* MORECORE_CONTIGUOUS */
+ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
+#endif /* MORECORE_CONTIGUOUS */
+
+#if (FOOTERS && !INSECURE)
+ {
+#if USE_DEV_RANDOM
+ int fd;
+ unsigned char buf[sizeof(size_t)];
+ /* Try to use /dev/urandom, else fall back on using time */
+ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
+ read(fd, buf, sizeof(buf)) == sizeof(buf)) {
+ s = *((size_t *) buf);
+ close(fd);
+ }
+ else
+#endif /* USE_DEV_RANDOM */
+ s = (size_t)(time(0) ^ (size_t)0x55555555U);
+
+ s |= (size_t)8U; /* ensure nonzero */
+ s &= ~(size_t)7U; /* improve chances of fault for bad values */
+
+ }
+#else /* (FOOTERS && !INSECURE) */
+ s = (size_t)0x58585858U;
+#endif /* (FOOTERS && !INSECURE) */
+ ACQUIRE_MAGIC_INIT_LOCK();
+ if (mparams.magic == 0) {
+ mparams.magic = s;
+ /* Set up lock for main malloc area */
+ INITIAL_LOCK(&gm->mutex);
+ gm->mflags = mparams.default_mflags;
+ }
+ RELEASE_MAGIC_INIT_LOCK();
+
+#ifndef WIN32
+ mparams.page_size = malloc_getpagesize;
+ mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
+ DEFAULT_GRANULARITY : mparams.page_size);
+#else /* WIN32 */
+ {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ mparams.page_size = system_info.dwPageSize;
+ mparams.granularity = system_info.dwAllocationGranularity;
+ }
+#endif /* WIN32 */
+
+ /* Sanity-check configuration:
+ size_t must be unsigned and as wide as pointer type.
+ ints must be at least 4 bytes.
+ alignment must be at least 8.
+ Alignment, min chunk size, and page size must all be powers of 2.
+ */
+ if ((sizeof(size_t) != sizeof(char*)) ||
+ (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
+ (sizeof(int) < 4) ||
+ (MALLOC_ALIGNMENT < (size_t)8U) ||
+ ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
+ ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
+ ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
+ ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
+ ABORT;
+ }
+ return 0;
+}
+
+/* support for mallopt */
+static int change_mparam(int param_number, int value) {
+ size_t val = (size_t)value;
+ init_mparams();
+ switch(param_number) {
+ case M_TRIM_THRESHOLD:
+ mparams.trim_threshold = val;
+ return 1;
+ case M_GRANULARITY:
+ if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
+ mparams.granularity = val;
+ return 1;
+ }
+ else
+ return 0;
+ case M_MMAP_THRESHOLD:
+ mparams.mmap_threshold = val;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#if DEBUG
+/* ------------------------- Debugging Support --------------------------- */
+
+/* Check properties of any chunk, whether free, inuse, mmapped etc */
+static void do_check_any_chunk(mstate m, mchunkptr p) {
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+}
+
+/* Check properties of top chunk */
+static void do_check_top_chunk(mstate m, mchunkptr p) {
+ msegmentptr sp = segment_holding(m, (char*)p);
+ size_t sz = chunksize(p);
+ assert(sp != 0);
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+ assert(sz == m->topsize);
+ assert(sz > 0);
+ assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
+ assert(pinuse(p));
+ assert(!next_pinuse(p));
+}
+
+/* Check properties of (inuse) mmapped chunks */
+static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
+ size_t sz = chunksize(p);
+ size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
+ assert(is_mmapped(p));
+ assert(use_mmap(m));
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+ assert(!is_small(sz));
+ assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
+ assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
+ assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
+}
+
+/* Check properties of inuse chunks */
+static void do_check_inuse_chunk(mstate m, mchunkptr p) {
+ do_check_any_chunk(m, p);
+ assert(cinuse(p));
+ assert(next_pinuse(p));
+ /* If not pinuse and not mmapped, previous chunk has OK offset */
+ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
+ if (is_mmapped(p))
+ do_check_mmapped_chunk(m, p);
+}
+
+/* Check properties of free chunks */
+static void do_check_free_chunk(mstate m, mchunkptr p) {
+ size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
+ mchunkptr next = chunk_plus_offset(p, sz);
+ do_check_any_chunk(m, p);
+ assert(!cinuse(p));
+ assert(!next_pinuse(p));
+ assert (!is_mmapped(p));
+ if (p != m->dv && p != m->top) {
+ if (sz >= MIN_CHUNK_SIZE) {
+ assert((sz & CHUNK_ALIGN_MASK) == 0);
+ assert(is_aligned(chunk2mem(p)));
+ assert(next->prev_foot == sz);
+ assert(pinuse(p));
+ assert (next == m->top || cinuse(next));
+ assert(p->fd->bk == p);
+ assert(p->bk->fd == p);
+ }
+ else /* markers are always of size SIZE_T_SIZE */
+ assert(sz == SIZE_T_SIZE);
+ }
+}
+
+/* Check properties of malloced chunks at the point they are malloced */
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
+ do_check_inuse_chunk(m, p);
+ assert((sz & CHUNK_ALIGN_MASK) == 0);
+ assert(sz >= MIN_CHUNK_SIZE);
+ assert(sz >= s);
+ /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
+ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
+ }
+}
+
+/* Check a tree and its subtrees. */
+static void do_check_tree(mstate m, tchunkptr t) {
+ tchunkptr head = 0;
+ tchunkptr u = t;
+ bindex_t tindex = t->index;
+ size_t tsize = chunksize(t);
+ bindex_t idx;
+ compute_tree_index(tsize, idx);
+ assert(tindex == idx);
+ assert(tsize >= MIN_LARGE_SIZE);
+ assert(tsize >= minsize_for_tree_index(idx));
+ assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
+
+ do { /* traverse through chain of same-sized nodes */
+ do_check_any_chunk(m, ((mchunkptr)u));
+ assert(u->index == tindex);
+ assert(chunksize(u) == tsize);
+ assert(!cinuse(u));
+ assert(!next_pinuse(u));
+ assert(u->fd->bk == u);
+ assert(u->bk->fd == u);
+ if (u->parent == 0) {
+ assert(u->child[0] == 0);
+ assert(u->child[1] == 0);
+ }
+ else {
+ assert(head == 0); /* only one node on chain has parent */
+ head = u;
+ assert(u->parent != u);
+ assert (u->parent->child[0] == u ||
+ u->parent->child[1] == u ||
+ *((tbinptr*)(u->parent)) == u);
+ if (u->child[0] != 0) {
+ assert(u->child[0]->parent == u);
+ assert(u->child[0] != u);
+ do_check_tree(m, u->child[0]);
+ }
+ if (u->child[1] != 0) {
+ assert(u->child[1]->parent == u);
+ assert(u->child[1] != u);
+ do_check_tree(m, u->child[1]);
+ }
+ if (u->child[0] != 0 && u->child[1] != 0) {
+ assert(chunksize(u->child[0]) < chunksize(u->child[1]));
+ }
+ }
+ u = u->fd;
+ } while (u != t);
+ assert(head != 0);
+}
+
+/* Check all the chunks in a treebin. */
+static void do_check_treebin(mstate m, bindex_t i) {
+ tbinptr* tb = treebin_at(m, i);
+ tchunkptr t = *tb;
+ int empty = (m->treemap & (1U << i)) == 0;
+ if (t == 0)
+ assert(empty);
+ if (!empty)
+ do_check_tree(m, t);
+}
+
+/* Check all the chunks in a smallbin. */
+static void do_check_smallbin(mstate m, bindex_t i) {
+ sbinptr b = smallbin_at(m, i);
+ mchunkptr p = b->bk;
+ unsigned int empty = (m->smallmap & (1U << i)) == 0;
+ if (p == b)
+ assert(empty);
+ if (!empty) {
+ for (; p != b; p = p->bk) {
+ size_t size = chunksize(p);
+ mchunkptr q;
+ /* each chunk claims to be free */
+ do_check_free_chunk(m, p);
+ /* chunk belongs in bin */
+ assert(small_index(size) == i);
+ assert(p->bk == b || chunksize(p->bk) == chunksize(p));
+ /* chunk is followed by an inuse chunk */
+ q = next_chunk(p);
+ if (q->head != FENCEPOST_HEAD)
+ do_check_inuse_chunk(m, q);
+ }
+ }
+}
+
+/* Find x in a bin. Used in other check functions. */
+static int bin_find(mstate m, mchunkptr x) {
+ size_t size = chunksize(x);
+ if (is_small(size)) {
+ bindex_t sidx = small_index(size);
+ sbinptr b = smallbin_at(m, sidx);
+ if (smallmap_is_marked(m, sidx)) {
+ mchunkptr p = b;
+ do {
+ if (p == x)
+ return 1;
+ } while ((p = p->fd) != b);
+ }
+ }
+ else {
+ bindex_t tidx;
+ compute_tree_index(size, tidx);
+ if (treemap_is_marked(m, tidx)) {
+ tchunkptr t = *treebin_at(m, tidx);
+ size_t sizebits = size << leftshift_for_tree_index(tidx);
+ while (t != 0 && chunksize(t) != size) {
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ sizebits <<= 1;
+ }
+ if (t != 0) {
+ tchunkptr u = t;
+ do {
+ if (u == (tchunkptr)x)
+ return 1;
+ } while ((u = u->fd) != t);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Traverse each chunk and check it; return total */
+static size_t traverse_and_check(mstate m) {
+ size_t sum = 0;
+ if (is_initialized(m)) {
+ msegmentptr s = &m->seg;
+ sum += m->topsize + TOP_FOOT_SIZE;
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ mchunkptr lastq = 0;
+ assert(pinuse(q));
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ sum += chunksize(q);
+ if (cinuse(q)) {
+ assert(!bin_find(m, q));
+ do_check_inuse_chunk(m, q);
+ }
+ else {
+ assert(q == m->dv || bin_find(m, q));
+ assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
+ do_check_free_chunk(m, q);
+ }
+ lastq = q;
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+ }
+ return sum;
+}
+
+/* Check all properties of malloc_state. */
+static void do_check_malloc_state(mstate m) {
+ bindex_t i;
+ size_t total;
+ /* check bins */
+ for (i = 0; i < NSMALLBINS; ++i)
+ do_check_smallbin(m, i);
+ for (i = 0; i < NTREEBINS; ++i)
+ do_check_treebin(m, i);
+
+ if (m->dvsize != 0) { /* check dv chunk */
+ do_check_any_chunk(m, m->dv);
+ assert(m->dvsize == chunksize(m->dv));
+ assert(m->dvsize >= MIN_CHUNK_SIZE);
+ assert(bin_find(m, m->dv) == 0);
+ }
+
+ if (m->top != 0) { /* check top chunk */
+ do_check_top_chunk(m, m->top);
+ assert(m->topsize == chunksize(m->top));
+ assert(m->topsize > 0);
+ assert(bin_find(m, m->top) == 0);
+ }
+
+ total = traverse_and_check(m);
+ assert(total <= m->footprint);
+ assert(m->footprint <= m->max_footprint);
+}
+#endif /* DEBUG */
+
+/* ----------------------------- statistics ------------------------------ */
+
+#if !NO_MALLINFO
+static struct mallinfo internal_mallinfo(mstate m) {
+ struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ if (!PREACTION(m)) {
+ check_malloc_state(m);
+ if (is_initialized(m)) {
+ size_t nfree = SIZE_T_ONE; /* top always free */
+ size_t mfree = m->topsize + TOP_FOOT_SIZE;
+ size_t sum = mfree;
+ msegmentptr s = &m->seg;
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ size_t sz = chunksize(q);
+ sum += sz;
+ if (!cinuse(q)) {
+ mfree += sz;
+ ++nfree;
+ }
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+
+ nm.arena = sum;
+ nm.ordblks = nfree;
+ nm.hblkhd = m->footprint - sum;
+ nm.usmblks = m->max_footprint;
+ nm.uordblks = m->footprint - mfree;
+ nm.fordblks = mfree;
+ nm.keepcost = m->topsize;
+ }
+
+ POSTACTION(m);
+ }
+ return nm;
+}
+#endif /* !NO_MALLINFO */
+
+static void internal_malloc_stats(mstate m) {
+ if (!PREACTION(m)) {
+ size_t maxfp = 0;
+ size_t fp = 0;
+ size_t used = 0;
+ check_malloc_state(m);
+ if (is_initialized(m)) {
+ msegmentptr s = &m->seg;
+ maxfp = m->max_footprint;
+ fp = m->footprint;
+ used = fp - (m->topsize + TOP_FOOT_SIZE);
+
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ if (!cinuse(q))
+ used -= chunksize(q);
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+ }
+
+ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
+ fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
+ fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
+
+ POSTACTION(m);
+ }
+}
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/*
+ Various forms of linking and unlinking are defined as macros. Even
+ the ones for trees, which are very long but have very short typical
+ paths. This is ugly but reduces reliance on inlining support of
+ compilers.
+*/
+
+/* Link a free chunk into a smallbin */
+#define insert_small_chunk(M, P, S) {\
+ bindex_t I = small_index(S);\
+ mchunkptr B = smallbin_at(M, I);\
+ mchunkptr F = B;\
+ assert(S >= MIN_CHUNK_SIZE);\
+ if (!smallmap_is_marked(M, I))\
+ mark_smallmap(M, I);\
+ else if (RTCHECK(ok_address(M, B->fd)))\
+ F = B->fd;\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ B->fd = P;\
+ F->bk = P;\
+ P->fd = F;\
+ P->bk = B;\
+}
+
+/* Unlink a chunk from a smallbin */
+#define unlink_small_chunk(M, P, S) {\
+ mchunkptr F = P->fd;\
+ mchunkptr B = P->bk;\
+ bindex_t I = small_index(S);\
+ assert(P != B);\
+ assert(P != F);\
+ assert(chunksize(P) == small_index2size(I));\
+ if (F == B)\
+ clear_smallmap(M, I);\
+ else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
+ (B == smallbin_at(M,I) || ok_address(M, B)))) {\
+ F->bk = B;\
+ B->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+}
+
+/* Unlink the first chunk from a smallbin */
+#define unlink_first_small_chunk(M, B, P, I) {\
+ mchunkptr F = P->fd;\
+ assert(P != B);\
+ assert(P != F);\
+ assert(chunksize(P) == small_index2size(I));\
+ if (B == F)\
+ clear_smallmap(M, I);\
+ else if (RTCHECK(ok_address(M, F))) {\
+ B->fd = F;\
+ F->bk = B;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+}
+
+/* Replace dv node, binning the old one */
+/* Used only when dvsize known to be small */
+#define replace_dv(M, P, S) {\
+ size_t DVS = M->dvsize;\
+ if (DVS != 0) {\
+ mchunkptr DV = M->dv;\
+ assert(is_small(DVS));\
+ insert_small_chunk(M, DV, DVS);\
+ }\
+ M->dvsize = S;\
+ M->dv = P;\
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+#define insert_large_chunk(M, X, S) {\
+ tbinptr* H;\
+ bindex_t I;\
+ compute_tree_index(S, I);\
+ H = treebin_at(M, I);\
+ X->index = I;\
+ X->child[0] = X->child[1] = 0;\
+ if (!treemap_is_marked(M, I)) {\
+ mark_treemap(M, I);\
+ *H = X;\
+ X->parent = (tchunkptr)H;\
+ X->fd = X->bk = X;\
+ }\
+ else {\
+ tchunkptr T = *H;\
+ size_t K = S << leftshift_for_tree_index(I);\
+ for (;;) {\
+ if (chunksize(T) != S) {\
+ tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+ K <<= 1;\
+ if (*C != 0)\
+ T = *C;\
+ else if (RTCHECK(ok_address(M, C))) {\
+ *C = X;\
+ X->parent = T;\
+ X->fd = X->bk = X;\
+ break;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ break;\
+ }\
+ }\
+ else {\
+ tchunkptr F = T->fd;\
+ if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
+ T->fd = F->bk = X;\
+ X->fd = F;\
+ X->bk = T;\
+ X->parent = 0;\
+ break;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ break;\
+ }\
+ }\
+ }\
+ }\
+}
+
+/*
+ Unlink steps:
+
+ 1. If x is a chained node, unlink it from its same-sized fd/bk links
+ and choose its bk node as its replacement.
+ 2. If x was the last node of its size, but not a leaf node, it must
+ be replaced with a leaf node (not merely one with an open left or
+ right), to make sure that lefts and rights of descendents
+ correspond properly to bit masks. We use the rightmost descendent
+ of x. We could use any other leaf, but this is easy to locate and
+ tends to counteract removal of leftmosts elsewhere, and so keeps
+ paths shorter than minimally guaranteed. This doesn't loop much
+ because on average a node in a tree is near the bottom.
+ 3. If x is the base of a chain (i.e., has parent links) relink
+ x's parent and children to x's replacement (or null if none).
+*/
+
+#define unlink_large_chunk(M, X) {\
+ tchunkptr XP = X->parent;\
+ tchunkptr R;\
+ if (X->bk != X) {\
+ tchunkptr F = X->fd;\
+ R = X->bk;\
+ if (RTCHECK(ok_address(M, F))) {\
+ F->bk = R;\
+ R->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else {\
+ tchunkptr* RP;\
+ if (((R = *(RP = &(X->child[1]))) != 0) ||\
+ ((R = *(RP = &(X->child[0]))) != 0)) {\
+ tchunkptr* CP;\
+ while ((*(CP = &(R->child[1])) != 0) ||\
+ (*(CP = &(R->child[0])) != 0)) {\
+ R = *(RP = CP);\
+ }\
+ if (RTCHECK(ok_address(M, RP)))\
+ *RP = 0;\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ }\
+ if (XP != 0) {\
+ tbinptr* H = treebin_at(M, X->index);\
+ if (X == *H) {\
+ if ((*H = R) == 0) \
+ clear_treemap(M, X->index);\
+ }\
+ else if (RTCHECK(ok_address(M, XP))) {\
+ if (XP->child[0] == X) \
+ XP->child[0] = R;\
+ else \
+ XP->child[1] = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ if (R != 0) {\
+ if (RTCHECK(ok_address(M, R))) {\
+ tchunkptr C0, C1;\
+ R->parent = XP;\
+ if ((C0 = X->child[0]) != 0) {\
+ if (RTCHECK(ok_address(M, C0))) {\
+ R->child[0] = C0;\
+ C0->parent = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ if ((C1 = X->child[1]) != 0) {\
+ if (RTCHECK(ok_address(M, C1))) {\
+ R->child[1] = C1;\
+ C1->parent = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+}
+
+/* Relays to large vs small bin operations */
+
+#define insert_chunk(M, P, S)\
+ if (is_small(S)) insert_small_chunk(M, P, S)\
+ else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+ if (is_small(S)) unlink_small_chunk(M, P, S)\
+ else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
+
+
+/* Relays to internal calls to malloc/free from realloc, memalign etc */
+
+#if ONLY_MSPACES
+#define internal_malloc(m, b) mspace_malloc(m, b)
+#define internal_free(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+#if MSPACES
+#define internal_malloc(m, b)\
+ (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
+#define internal_free(m, mem)\
+ if (m == gm) dlfree(mem); else mspace_free(m,mem);
+#else /* MSPACES */
+#define internal_malloc(m, b) dlmalloc(b)
+#define internal_free(m, mem) dlfree(mem)
+#endif /* MSPACES */
+#endif /* ONLY_MSPACES */
+
+/* ----------------------- Direct-mmapping chunks ----------------------- */
+
+/*
+ Directly mmapped chunks are set up with an offset to the start of
+ the mmapped region stored in the prev_foot field of the chunk. This
+ allows reconstruction of the required argument to MUNMAP when freed,
+ and also allows adjustment of the returned chunk to meet alignment
+ requirements (especially in memalign). There is also enough space
+ allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
+ the PINUSE bit so frees can be checked.
+*/
+
+/* Malloc using mmap */
+static void* mmap_alloc(mstate m, size_t nb) {
+ size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ if (mmsize > nb) { /* Check for wrap around 0 */
+ char* mm = (char*)(DIRECT_MMAP(mmsize));
+ if (mm != CMFAIL) {
+ size_t offset = align_offset(chunk2mem(mm));
+ size_t psize = mmsize - offset - MMAP_FOOT_PAD;
+ mchunkptr p = (mchunkptr)(mm + offset);
+ p->prev_foot = offset | IS_MMAPPED_BIT;
+ (p)->head = (psize|CINUSE_BIT);
+ mark_inuse_foot(m, p, psize);
+ chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
+
+ if (mm < m->least_addr)
+ m->least_addr = mm;
+ if ((m->footprint += mmsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+ assert(is_aligned(chunk2mem(p)));
+ check_mmapped_chunk(m, p);
+ return chunk2mem(p);
+ }
+ }
+ return 0;
+}
+
+/* Realloc using mmap */
+static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
+ size_t oldsize = chunksize(oldp);
+ if (is_small(nb)) /* Can't shrink mmap regions below small size */
+ return 0;
+ /* Keep old chunk if big enough but not too big */
+ if (oldsize >= nb + SIZE_T_SIZE &&
+ (oldsize - nb) <= (mparams.granularity << 1))
+ return oldp;
+ else {
+ size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
+ size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
+ size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
+ CHUNK_ALIGN_MASK);
+ char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
+ oldmmsize, newmmsize, 1);
+ if (cp != CMFAIL) {
+ mchunkptr newp = (mchunkptr)(cp + offset);
+ size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
+ newp->head = (psize|CINUSE_BIT);
+ mark_inuse_foot(m, newp, psize);
+ chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
+
+ if (cp < m->least_addr)
+ m->least_addr = cp;
+ if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+ check_mmapped_chunk(m, newp);
+ return newp;
+ }
+ }
+ return 0;
+}
+
+/* -------------------------- mspace management -------------------------- */
+
+/* Initialize top chunk and its size */
+static void init_top(mstate m, mchunkptr p, size_t psize) {
+ /* Ensure alignment */
+ size_t offset = align_offset(chunk2mem(p));
+ p = (mchunkptr)((char*)p + offset);
+ psize -= offset;
+
+ m->top = p;
+ m->topsize = psize;
+ p->head = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
+ m->trim_check = mparams.trim_threshold; /* reset on each update */
+}
+
+/* Initialize bins for a new mstate that is otherwise zeroed out */
+static void init_bins(mstate m) {
+ /* Establish circular links for smallbins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; ++i) {
+ sbinptr bin = smallbin_at(m,i);
+ bin->fd = bin->bk = bin;
+ }
+}
+
+#if PROCEED_ON_ERROR
+
+/* default corruption action */
+static void reset_on_error(mstate m) {
+ int i;
+ ++malloc_corruption_error_count;
+ /* Reinitialize fields to forget about all memory */
+ m->smallbins = m->treebins = 0;
+ m->dvsize = m->topsize = 0;
+ m->seg.base = 0;
+ m->seg.size = 0;
+ m->seg.next = 0;
+ m->top = m->dv = 0;
+ for (i = 0; i < NTREEBINS; ++i)
+ *treebin_at(m, i) = 0;
+ init_bins(m);
+}
+#endif /* PROCEED_ON_ERROR */
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
+ size_t nb) {
+ mchunkptr p = align_as_chunk(newbase);
+ mchunkptr oldfirst = align_as_chunk(oldbase);
+ size_t psize = (char*)oldfirst - (char*)p;
+ mchunkptr q = chunk_plus_offset(p, nb);
+ size_t qsize = psize - nb;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+
+ assert((char*)oldfirst > (char*)q);
+ assert(pinuse(oldfirst));
+ assert(qsize >= MIN_CHUNK_SIZE);
+
+ /* consolidate remainder with first chunk of old base */
+ if (oldfirst == m->top) {
+ size_t tsize = m->topsize += qsize;
+ m->top = q;
+ q->head = tsize | PINUSE_BIT;
+ check_top_chunk(m, q);
+ }
+ else if (oldfirst == m->dv) {
+ size_t dsize = m->dvsize += qsize;
+ m->dv = q;
+ set_size_and_pinuse_of_free_chunk(q, dsize);
+ }
+ else {
+ if (!cinuse(oldfirst)) {
+ size_t nsize = chunksize(oldfirst);
+ unlink_chunk(m, oldfirst, nsize);
+ oldfirst = chunk_plus_offset(oldfirst, nsize);
+ qsize += nsize;
+ }
+ set_free_with_pinuse(q, qsize, oldfirst);
+ insert_chunk(m, q, qsize);
+ check_free_chunk(m, q);
+ }
+
+ check_malloced_chunk(m, chunk2mem(p), nb);
+ return chunk2mem(p);
+}
+
+
+/* Add a segment to hold a new noncontiguous region */
+static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
+ /* Determine locations and sizes of segment, fenceposts, old top */
+ char* old_top = (char*)m->top;
+ msegmentptr oldsp = segment_holding(m, old_top);
+ char* old_end = oldsp->base + oldsp->size;
+ size_t ssize = pad_request(sizeof(struct malloc_segment));
+ char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ size_t offset = align_offset(chunk2mem(rawsp));
+ char* asp = rawsp + offset;
+ char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+ mchunkptr sp = (mchunkptr)csp;
+ msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+ mchunkptr tnext = chunk_plus_offset(sp, ssize);
+ mchunkptr p = tnext;
+ int nfences = 0;
+
+ /* reset top to new space */
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+
+ /* Set up segment record */
+ assert(is_aligned(ss));
+ set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
+ *ss = m->seg; /* Push current record */
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ m->seg.sflags = mmapped;
+ m->seg.next = ss;
+
+ /* Insert trailing fenceposts */
+ for (;;) {
+ mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
+ p->head = FENCEPOST_HEAD;
+ ++nfences;
+ if ((char*)(&(nextp->head)) < old_end)
+ p = nextp;
+ else
+ break;
+ }
+ assert(nfences >= 2);
+
+ /* Insert the rest of old top into a bin as an ordinary free chunk */
+ if (csp != old_top) {
+ mchunkptr q = (mchunkptr)old_top;
+ size_t psize = csp - old_top;
+ mchunkptr tn = chunk_plus_offset(q, psize);
+ set_free_with_pinuse(q, psize, tn);
+ insert_chunk(m, q, psize);
+ }
+
+ check_top_chunk(m, m->top);
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+/* Get memory from system using MORECORE or MMAP */
+static void* sys_alloc(mstate m, size_t nb) {
+ char* tbase = CMFAIL;
+ size_t tsize = 0;
+ flag_t mmap_flag = 0;
+
+ init_mparams();
+
+ /* Directly map large chunks */
+ if (use_mmap(m) && nb >= mparams.mmap_threshold) {
+ void* mem = mmap_alloc(m, nb);
+ if (mem != 0)
+ return mem;
+ }
+
+ /*
+ Try getting memory in any of three ways (in most-preferred to
+ least-preferred order):
+ 1. A call to MORECORE that can normally contiguously extend memory.
+ (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
+ or main space is mmapped or a previous contiguous call failed)
+ 2. A call to MMAP new space (disabled if not HAVE_MMAP).
+ Note that under the default settings, if MORECORE is unable to
+ fulfill a request, and HAVE_MMAP is true, then mmap is
+ used as a noncontiguous system allocator. This is a useful backup
+ strategy for systems with holes in address spaces -- in this case
+ sbrk cannot contiguously expand the heap, but mmap may be able to
+ find space.
+ 3. A call to MORECORE that cannot usually contiguously extend memory.
+ (disabled if not HAVE_MORECORE)
+ */
+
+ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
+ char* br = CMFAIL;
+ msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
+ size_t asize = 0;
+ ACQUIRE_MORECORE_LOCK();
+
+ if (ss == 0) { /* First time through or recovery */
+ char* base = (char*)CALL_MORECORE(0);
+ if (base != CMFAIL) {
+ asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+ /* Adjust to end on a page boundary */
+ if (!is_page_aligned(base))
+ asize += (page_align((size_t)base) - (size_t)base);
+ /* Can't call MORECORE if size is negative when treated as signed */
+ if (asize < HALF_MAX_SIZE_T &&
+ (br = (char*)(CALL_MORECORE(asize))) == base) {
+ tbase = base;
+ tsize = asize;
+ }
+ }
+ }
+ else {
+ /* Subtract out existing available top space from MORECORE request. */
+ asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
+ /* Use mem here only if it did continuously extend old space */
+ if (asize < HALF_MAX_SIZE_T &&
+ (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
+ tbase = br;
+ tsize = asize;
+ }
+ }
+
+ if (tbase == CMFAIL) { /* Cope with partial failure */
+ if (br != CMFAIL) { /* Try to use/extend the space we did get */
+ if (asize < HALF_MAX_SIZE_T &&
+ asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
+ size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
+ if (esize < HALF_MAX_SIZE_T) {
+ char* end = (char*)CALL_MORECORE(esize);
+ if (end != CMFAIL)
+ asize += esize;
+ else { /* Can't use; try to release */
+ CALL_MORECORE(-asize);
+ br = CMFAIL;
+ }
+ }
+ }
+ }
+ if (br != CMFAIL) { /* Use the space we did get */
+ tbase = br;
+ tsize = asize;
+ }
+ else
+ disable_contiguous(m); /* Don't try contiguous path in the future */
+ }
+
+ RELEASE_MORECORE_LOCK();
+ }
+
+ if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
+ size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
+ size_t rsize = granularity_align(req);
+ if (rsize > nb) { /* Fail if wraps around zero */
+ char* mp = (char*)(CALL_MMAP(rsize));
+ if (mp != CMFAIL) {
+ tbase = mp;
+ tsize = rsize;
+ mmap_flag = IS_MMAPPED_BIT;
+ }
+ }
+ }
+
+ if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
+ size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+ if (asize < HALF_MAX_SIZE_T) {
+ char* br = CMFAIL;
+ char* end = CMFAIL;
+ ACQUIRE_MORECORE_LOCK();
+ br = (char*)(CALL_MORECORE(asize));
+ end = (char*)(CALL_MORECORE(0));
+ RELEASE_MORECORE_LOCK();
+ if (br != CMFAIL && end != CMFAIL && br < end) {
+ size_t ssize = end - br;
+ if (ssize > nb + TOP_FOOT_SIZE) {
+ tbase = br;
+ tsize = ssize;
+ }
+ }
+ }
+ }
+
+ if (tbase != CMFAIL) {
+
+ if ((m->footprint += tsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+
+ if (!is_initialized(m)) { /* first-time initialization */
+ m->seg.base = m->least_addr = tbase;
+ m->seg.size = tsize;
+ m->seg.sflags = mmap_flag;
+ m->magic = mparams.magic;
+ init_bins(m);
+ if (is_global(m))
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+ else {
+ /* Offset top by embedded malloc_state */
+ mchunkptr mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
+ }
+ }
+
+ else {
+ /* Try to merge with an existing segment */
+ msegmentptr sp = &m->seg;
+ while (sp != 0 && tbase != sp->base + sp->size)
+ sp = sp->next;
+ if (sp != 0 &&
+ !is_extern_segment(sp) &&
+ (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
+ segment_holds(sp, m->top)) { /* append */
+ sp->size += tsize;
+ init_top(m, m->top, m->topsize + tsize);
+ }
+ else {
+ if (tbase < m->least_addr)
+ m->least_addr = tbase;
+ sp = &m->seg;
+ while (sp != 0 && sp->base != tbase + tsize)
+ sp = sp->next;
+ if (sp != 0 &&
+ !is_extern_segment(sp) &&
+ (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
+ char* oldbase = sp->base;
+ sp->base = tbase;
+ sp->size += tsize;
+ return prepend_alloc(m, tbase, oldbase, nb);
+ }
+ else
+ add_segment(m, tbase, tsize, mmap_flag);
+ }
+ }
+
+ if (nb < m->topsize) { /* Allocate from new or extended top space */
+ size_t rsize = m->topsize -= nb;
+ mchunkptr p = m->top;
+ mchunkptr r = m->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+ check_top_chunk(m, m->top);
+ check_malloced_chunk(m, chunk2mem(p), nb);
+ return chunk2mem(p);
+ }
+ }
+
+ MALLOC_FAILURE_ACTION;
+ return 0;
+}
+
+/* ----------------------- system deallocation -------------------------- */
+
+/* Unmap and unlink any mmapped segments that don't contain used chunks */
+static size_t release_unused_segments(mstate m) {
+ size_t released = 0;
+ msegmentptr pred = &m->seg;
+ msegmentptr sp = pred->next;
+ while (sp != 0) {
+ char* base = sp->base;
+ size_t size = sp->size;
+ msegmentptr next = sp->next;
+ if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
+ mchunkptr p = align_as_chunk(base);
+ size_t psize = chunksize(p);
+ /* Can unmap if first chunk holds entire segment and not pinned */
+ if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
+ tchunkptr tp = (tchunkptr)p;
+ assert(segment_holds(sp, (char*)sp));
+ if (p == m->dv) {
+ m->dv = 0;
+ m->dvsize = 0;
+ }
+ else {
+ unlink_large_chunk(m, tp);
+ }
+ if (CALL_MUNMAP(base, size) == 0) {
+ released += size;
+ m->footprint -= size;
+ /* unlink obsoleted record */
+ sp = pred;
+ sp->next = next;
+ }
+ else { /* back out if cannot unmap */
+ insert_large_chunk(m, tp, psize);
+ }
+ }
+ }
+ pred = sp;
+ sp = next;
+ }
+ return released;
+}
+
+static int sys_trim(mstate m, size_t pad) {
+ size_t released = 0;
+ if (pad < MAX_REQUEST && is_initialized(m)) {
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+ if (m->topsize > pad) {
+ /* Shrink top space in granularity-size units, keeping at least one */
+ size_t unit = mparams.granularity;
+ size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+ SIZE_T_ONE) * unit;
+ msegmentptr sp = segment_holding(m, (char*)m->top);
+
+ if (!is_extern_segment(sp)) {
+ if (is_mmapped_segment(sp)) {
+ if (HAVE_MMAP &&
+ sp->size >= extra &&
+ !has_segment_link(m, sp)) { /* can't shrink if pinned */
+ size_t newsize = sp->size - extra;
+ /* Prefer mremap, fall back to munmap */
+ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
+ (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
+ released = extra;
+ }
+ }
+ }
+ else if (HAVE_MORECORE) {
+ if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
+ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
+ ACQUIRE_MORECORE_LOCK();
+ {
+ /* Make sure end of memory is where we last set it. */
+ char* old_br = (char*)(CALL_MORECORE(0));
+ if (old_br == sp->base + sp->size) {
+ char* rel_br = (char*)(CALL_MORECORE(-extra));
+ char* new_br = (char*)(CALL_MORECORE(0));
+ if (rel_br != CMFAIL && new_br < old_br)
+ released = old_br - new_br;
+ }
+ }
+ RELEASE_MORECORE_LOCK();
+ }
+ }
+
+ if (released != 0) {
+ sp->size -= released;
+ m->footprint -= released;
+ init_top(m, m->top, m->topsize - released);
+ check_top_chunk(m, m->top);
+ }
+ }
+
+ /* Unmap any unused mmapped segments */
+ if (HAVE_MMAP)
+ released += release_unused_segments(m);
+
+ /* On failure, disable autotrim to avoid repeated failed future calls */
+ if (released == 0)
+ m->trim_check = MAX_SIZE_T;
+ }
+
+ return (released != 0)? 1 : 0;
+}
+
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+static void* tmalloc_large(mstate m, size_t nb) {
+ tchunkptr v = 0;
+ size_t rsize = -nb; /* Unsigned negation */
+ tchunkptr t;
+ bindex_t idx;
+ compute_tree_index(nb, idx);
+
+ if ((t = *treebin_at(m, idx)) != 0) {
+ /* Traverse tree for this bin looking for node with size == nb */
+ size_t sizebits = nb << leftshift_for_tree_index(idx);
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
+ for (;;) {
+ tchunkptr rt;
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ v = t;
+ if ((rsize = trem) == 0)
+ break;
+ }
+ rt = t->child[1];
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
+ if (t == 0) {
+ t = rst; /* set t to least subtree holding sizes > nb */
+ break;
+ }
+ sizebits <<= 1;
+ }
+ }
+
+ if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
+ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
+ if (leftbits != 0) {
+ bindex_t i;
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ t = *treebin_at(m, i);
+ }
+ }
+
+ while (t != 0) { /* find smallest of tree or subtree */
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ t = leftmost_child(t);
+ }
+
+ /* If dv is a better fit, return 0 so malloc will use it */
+ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
+ if (RTCHECK(ok_address(m, v))) { /* split */
+ mchunkptr r = chunk_plus_offset(v, nb);
+ assert(chunksize(v) == rsize + nb);
+ if (RTCHECK(ok_next(v, r))) {
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ insert_chunk(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ }
+ CORRUPTION_ERROR_ACTION(m);
+ }
+ return 0;
+}
+
+/* allocate a small request from the best fitting chunk in a treebin */
+static void* tmalloc_small(mstate m, size_t nb) {
+ tchunkptr t, v;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leastbit = least_bit(m->treemap);
+ compute_bit2idx(leastbit, i);
+
+ v = t = *treebin_at(m, i);
+ rsize = chunksize(t) - nb;
+
+ while ((t = leftmost_child(t)) != 0) {
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ if (RTCHECK(ok_address(m, v))) {
+ mchunkptr r = chunk_plus_offset(v, nb);
+ assert(chunksize(v) == rsize + nb);
+ if (RTCHECK(ok_next(v, r))) {
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ }
+
+ CORRUPTION_ERROR_ACTION(m);
+ return 0;
+}
+
+/* --------------------------- realloc support --------------------------- */
+
+static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
+ if (bytes >= MAX_REQUEST) {
+ MALLOC_FAILURE_ACTION;
+ return 0;
+ }
+ if (!PREACTION(m)) {
+ mchunkptr oldp = mem2chunk(oldmem);
+ size_t oldsize = chunksize(oldp);
+ mchunkptr next = chunk_plus_offset(oldp, oldsize);
+ mchunkptr newp = 0;
+ void* extra = 0;
+
+ /* Try to either shrink or extend into top. Else malloc-copy-free */
+
+ if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
+ ok_next(oldp, next) && ok_pinuse(next))) {
+ size_t nb = request2size(bytes);
+ if (is_mmapped(oldp))
+ newp = mmap_resize(m, oldp, nb);
+ else if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ newp = oldp;
+ if (rsize >= MIN_CHUNK_SIZE) {
+ mchunkptr remainder = chunk_plus_offset(newp, nb);
+ set_inuse(m, newp, nb);
+ set_inuse(m, remainder, rsize);
+ extra = chunk2mem(remainder);
+ }
+ }
+ else if (next == m->top && oldsize + m->topsize > nb) {
+ /* Expand into top */
+ size_t newsize = oldsize + m->topsize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = chunk_plus_offset(oldp, nb);
+ set_inuse(m, oldp, nb);
+ newtop->head = newtopsize |PINUSE_BIT;
+ m->top = newtop;
+ m->topsize = newtopsize;
+ newp = oldp;
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(m, oldmem);
+ POSTACTION(m);
+ return 0;
+ }
+
+ POSTACTION(m);
+
+ if (newp != 0) {
+ if (extra != 0) {
+ internal_free(m, extra);
+ }
+ check_inuse_chunk(m, newp);
+ return chunk2mem(newp);
+ }
+ else {
+ void* newmem = internal_malloc(m, bytes);
+ if (newmem != 0) {
+ size_t oc = oldsize - overhead_for(oldp);
+ memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
+ internal_free(m, oldmem);
+ }
+ return newmem;
+ }
+ }
+ return 0;
+}
+
+/* --------------------------- memalign support -------------------------- */
+
+static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
+ if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
+ return internal_malloc(m, bytes);
+ if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
+ alignment = MIN_CHUNK_SIZE;
+ if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
+ size_t a = MALLOC_ALIGNMENT << 1;
+ while (a < alignment) a <<= 1;
+ alignment = a;
+ }
+
+ if (bytes >= MAX_REQUEST - alignment) {
+ if (m != 0) { /* Test isn't needed but avoids compiler warning */
+ MALLOC_FAILURE_ACTION;
+ }
+ }
+ else {
+ size_t nb = request2size(bytes);
+ size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
+ char* mem = (char*)internal_malloc(m, req);
+ if (mem != 0) {
+ void* leader = 0;
+ void* trailer = 0;
+ mchunkptr p = mem2chunk(mem);
+
+ if (PREACTION(m)) return 0;
+ if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
+ /*
+ Find an aligned spot inside chunk. Since we need to give
+ back leading space in a chunk of at least MIN_CHUNK_SIZE, if
+ the first calculation places us at a spot with less than
+ MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
+ We've allocated enough total room so that this is always
+ possible.
+ */
+ char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
+ alignment -
+ SIZE_T_ONE)) &
+ -alignment));
+ char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
+ br : br+alignment;
+ mchunkptr newp = (mchunkptr)pos;
+ size_t leadsize = pos - (char*)(p);
+ size_t newsize = chunksize(p) - leadsize;
+
+ if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
+ newp->prev_foot = p->prev_foot + leadsize;
+ newp->head = (newsize|CINUSE_BIT);
+ }
+ else { /* Otherwise, give back leader, use the rest */
+ set_inuse(m, newp, newsize);
+ set_inuse(m, p, leadsize);
+ leader = chunk2mem(p);
+ }
+ p = newp;
+ }
+
+ /* Give back spare room at the end */
+ if (!is_mmapped(p)) {
+ size_t size = chunksize(p);
+ if (size > nb + MIN_CHUNK_SIZE) {
+ size_t remainder_size = size - nb;
+ mchunkptr remainder = chunk_plus_offset(p, nb);
+ set_inuse(m, p, nb);
+ set_inuse(m, remainder, remainder_size);
+ trailer = chunk2mem(remainder);
+ }
+ }
+
+ assert (chunksize(p) >= nb);
+ assert((((size_t)(chunk2mem(p))) % alignment) == 0);
+ check_inuse_chunk(m, p);
+ POSTACTION(m);
+ if (leader != 0) {
+ internal_free(m, leader);
+ }
+ if (trailer != 0) {
+ internal_free(m, trailer);
+ }
+ return chunk2mem(p);
+ }
+ }
+ return 0;
+}
+
+/* ------------------------ comalloc/coalloc support --------------------- */
+
+static void** ialloc(mstate m,
+ size_t n_elements,
+ size_t* sizes,
+ int opts,
+ void* chunks[]) {
+ /*
+ This provides common support for independent_X routines, handling
+ all of the combinations that can result.
+
+ The opts arg has:
+ bit 0 set if all elements are same size (using sizes[0])
+ bit 1 set if elements should be zeroed
+ */
+
+ size_t element_size; /* chunksize of each element, if all same */
+ size_t contents_size; /* total size of elements */
+ size_t array_size; /* request size of pointer array */
+ void* mem; /* malloced aggregate space */
+ mchunkptr p; /* corresponding chunk */
+ size_t remainder_size; /* remaining bytes while splitting */
+ void** marray; /* either "chunks" or malloced ptr array */
+ mchunkptr array_chunk; /* chunk for malloced ptr array */
+ flag_t was_enabled; /* to disable mmap */
+ size_t size;
+ size_t i;
+
+ /* compute array length, if needed */
+ if (chunks != 0) {
+ if (n_elements == 0)
+ return chunks; /* nothing to do */
+ marray = chunks;
+ array_size = 0;
+ }
+ else {
+ /* if empty req, must still return chunk representing empty array */
+ if (n_elements == 0)
+ return (void**)internal_malloc(m, 0);
+ marray = 0;
+ array_size = request2size(n_elements * (sizeof(void*)));
+ }
+
+ /* compute total element size */
+ if (opts & 0x1) { /* all-same-size */
+ element_size = request2size(*sizes);
+ contents_size = n_elements * element_size;
+ }
+ else { /* add up all the sizes */
+ element_size = 0;
+ contents_size = 0;
+ for (i = 0; i != n_elements; ++i)
+ contents_size += request2size(sizes[i]);
+ }
+
+ size = contents_size + array_size;
+
+ /*
+ Allocate the aggregate chunk. First disable direct-mmapping so
+ malloc won't use it, since we would not be able to later
+ free/realloc space internal to a segregated mmap region.
+ */
+ was_enabled = use_mmap(m);
+ disable_mmap(m);
+ mem = internal_malloc(m, size - CHUNK_OVERHEAD);
+ if (was_enabled)
+ enable_mmap(m);
+ if (mem == 0)
+ return 0;
+
+ if (PREACTION(m)) return 0;
+ p = mem2chunk(mem);
+ remainder_size = chunksize(p);
+
+ assert(!is_mmapped(p));
+
+ if (opts & 0x2) { /* optionally clear the elements */
+ memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
+ }
+
+ /* If not provided, allocate the pointer array as final part of chunk */
+ if (marray == 0) {
+ size_t array_chunk_size;
+ array_chunk = chunk_plus_offset(p, contents_size);
+ array_chunk_size = remainder_size - contents_size;
+ marray = (void**) (chunk2mem(array_chunk));
+ set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
+ remainder_size = contents_size;
+ }
+
+ /* split out elements */
+ for (i = 0; ; ++i) {
+ marray[i] = chunk2mem(p);
+ if (i != n_elements-1) {
+ if (element_size != 0)
+ size = element_size;
+ else
+ size = request2size(sizes[i]);
+ remainder_size -= size;
+ set_size_and_pinuse_of_inuse_chunk(m, p, size);
+ p = chunk_plus_offset(p, size);
+ }
+ else { /* the final element absorbs any overallocation slop */
+ set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
+ break;
+ }
+ }
+
+#if DEBUG
+ if (marray != chunks) {
+ /* final element must have exactly exhausted chunk */
+ if (element_size != 0) {
+ assert(remainder_size == element_size);
+ }
+ else {
+ assert(remainder_size == request2size(sizes[i]));
+ }
+ check_inuse_chunk(m, mem2chunk(marray));
+ }
+ for (i = 0; i != n_elements; ++i)
+ check_inuse_chunk(m, mem2chunk(marray[i]));
+
+#endif /* DEBUG */
+
+ POSTACTION(m);
+ return marray;
+}
+
+
+/* -------------------------- public routines ---------------------------- */
+
+#if !ONLY_MSPACES
+
+void* dlmalloc(size_t bytes) {
+ /*
+ Basic algorithm:
+ If a small request (< 256 bytes minus per-chunk overhead):
+ 1. If one exists, use a remainderless chunk in associated smallbin.
+ (Remainderless means that there are too few excess bytes to
+ represent as a chunk.)
+ 2. If it is big enough, use the dv chunk, which is normally the
+ chunk adjacent to the one used for the most recent small request.
+ 3. If one exists, split the smallest available chunk in a bin,
+ saving remainder in dv.
+ 4. If it is big enough, use the top chunk.
+ 5. If available, get memory from system and use it
+ Otherwise, for a large request:
+ 1. Find the smallest available binned chunk that fits, and use it
+ if it is better fitting than dv chunk, splitting if necessary.
+ 2. If better fitting than any binned chunk, use the dv chunk.
+ 3. If it is big enough, use the top chunk.
+ 4. If request size >= mmap threshold, try to directly mmap this chunk.
+ 5. If available, get memory from system and use it
+
+ The ugly goto's here ensure that postaction occurs along all paths.
+ */
+
+ if (!PREACTION(gm)) {
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+ idx = small_index(nb);
+ smallbits = gm->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(gm, idx);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(idx));
+ unlink_first_small_chunk(gm, b, p, idx);
+ set_inuse_and_pinuse(gm, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb > gm->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ b = smallbin_at(gm, i);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(i));
+ unlink_first_small_chunk(gm, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(gm, p, small_index2size(i));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(gm, r, rsize);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else {
+ nb = pad_request(bytes);
+ if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+ }
+
+ if (nb <= gm->dvsize) {
+ size_t rsize = gm->dvsize - nb;
+ mchunkptr p = gm->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
+ gm->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ }
+ else { /* exhaust dv */
+ size_t dvs = gm->dvsize;
+ gm->dvsize = 0;
+ gm->dv = 0;
+ set_inuse_and_pinuse(gm, p, dvs);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb < gm->topsize) { /* Split top */
+ size_t rsize = gm->topsize -= nb;
+ mchunkptr p = gm->top;
+ mchunkptr r = gm->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ mem = chunk2mem(p);
+ check_top_chunk(gm, gm->top);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ mem = sys_alloc(gm, nb);
+
+ postaction:
+ POSTACTION(gm);
+ return mem;
+ }
+
+ return 0;
+}
+
+void dlfree(void* mem) {
+ /*
+ Consolidate freed chunks with preceeding or succeeding bordering
+ free chunks, if they exist, and then place in a bin. Intermixed
+ with special cases for top, dv, mmapped chunks, and usage errors.
+ */
+
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+#if FOOTERS
+ mstate fm = get_mstate_for(p);
+ if (!ok_magic(fm)) {
+ USAGE_ERROR_ACTION(fm, p);
+ return;
+ }
+#else /* FOOTERS */
+#define fm gm
+#endif /* FOOTERS */
+ if (!PREACTION(fm)) {
+ check_inuse_chunk(fm, p);
+ if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if ((prevsize & IS_MMAPPED_BIT) != 0) {
+ prevsize &= ~IS_MMAPPED_BIT;
+ psize += prevsize + MMAP_FOOT_PAD;
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+ fm->footprint -= psize;
+ goto postaction;
+ }
+ else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ goto postaction;
+ }
+ }
+ else
+ goto erroraction;
+ }
+ }
+
+ if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (should_trim(fm, tsize))
+ sys_trim(fm, 0);
+ goto postaction;
+ }
+ else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ goto postaction;
+ }
+ else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ goto postaction;
+ }
+ }
+ }
+ else
+ set_free_with_pinuse(p, psize, next);
+ insert_chunk(fm, p, psize);
+ check_free_chunk(fm, p);
+ goto postaction;
+ }
+ }
+ erroraction:
+ USAGE_ERROR_ACTION(fm, p);
+ postaction:
+ POSTACTION(fm);
+ }
+ }
+#if !FOOTERS
+#undef fm
+#endif /* FOOTERS */
+}
+
+void* dlcalloc(size_t n_elements, size_t elem_size) {
+ void* mem;
+ size_t req = 0;
+ if (n_elements != 0) {
+ req = n_elements * elem_size;
+ if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+ (req / n_elements != elem_size))
+ req = MAX_SIZE_T; /* force downstream failure on overflow */
+ }
+ mem = dlmalloc(req);
+ if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
+ memset(mem, 0, req);
+ return mem;
+}
+
+void* dlrealloc(void* oldmem, size_t bytes) {
+ if (oldmem == 0)
+ return dlmalloc(bytes);
+#ifdef REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0) {
+ dlfree(oldmem);
+ return 0;
+ }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+ else {
+#if ! FOOTERS
+ mstate m = gm;
+#else /* FOOTERS */
+ mstate m = get_mstate_for(mem2chunk(oldmem));
+ if (!ok_magic(m)) {
+ USAGE_ERROR_ACTION(m, oldmem);
+ return 0;
+ }
+#endif /* FOOTERS */
+ return internal_realloc(m, oldmem, bytes);
+ }
+}
+
+void* dlmemalign(size_t alignment, size_t bytes) {
+ return internal_memalign(gm, alignment, bytes);
+}
+
+void** dlindependent_calloc(size_t n_elements, size_t elem_size,
+ void* chunks[]) {
+ size_t sz = elem_size; /* serves as 1-element array */
+ return ialloc(gm, n_elements, &sz, 3, chunks);
+}
+
+void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
+ void* chunks[]) {
+ return ialloc(gm, n_elements, sizes, 0, chunks);
+}
+
+void* dlvalloc(size_t bytes) {
+ size_t pagesz;
+ init_mparams();
+ pagesz = mparams.page_size;
+ return dlmemalign(pagesz, bytes);
+}
+
+void* dlpvalloc(size_t bytes) {
+ size_t pagesz;
+ init_mparams();
+ pagesz = mparams.page_size;
+ return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
+}
+
+int dlmalloc_trim(size_t pad) {
+ int result = 0;
+ if (!PREACTION(gm)) {
+ result = sys_trim(gm, pad);
+ POSTACTION(gm);
+ }
+ return result;
+}
+
+size_t dlmalloc_footprint(void) {
+ return gm->footprint;
+}
+
+size_t dlmalloc_max_footprint(void) {
+ return gm->max_footprint;
+}
+
+#if !NO_MALLINFO
+struct mallinfo dlmallinfo(void) {
+ return internal_mallinfo(gm);
+}
+#endif /* NO_MALLINFO */
+
+void dlmalloc_stats() {
+ internal_malloc_stats(gm);
+}
+
+size_t dlmalloc_usable_size(void* mem) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ if (cinuse(p))
+ return chunksize(p) - overhead_for(p);
+ }
+ return 0;
+}
+
+int dlmallopt(int param_number, int value) {
+ return change_mparam(param_number, value);
+}
+
+#endif /* !ONLY_MSPACES */
+
+/* ----------------------------- user mspaces ---------------------------- */
+
+#if MSPACES
+
+static mstate init_user_mstate(char* tbase, size_t tsize) {
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ mchunkptr mn;
+ mchunkptr msp = align_as_chunk(tbase);
+ mstate m = (mstate)(chunk2mem(msp));
+ memset(m, 0, msize);
+ INITIAL_LOCK(&m->mutex);
+ msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
+ m->seg.base = m->least_addr = tbase;
+ m->seg.size = m->footprint = m->max_footprint = tsize;
+ m->magic = mparams.magic;
+ m->mflags = mparams.default_mflags;
+ disable_contiguous(m);
+ init_bins(m);
+ mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
+ check_top_chunk(m, m->top);
+ return m;
+}
+
+mspace create_mspace(size_t capacity, int locked) {
+ mstate m = 0;
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ init_mparams(); /* Ensure pagesize etc initialized */
+
+ if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+ size_t rs = ((capacity == 0)? mparams.granularity :
+ (capacity + TOP_FOOT_SIZE + msize));
+ size_t tsize = granularity_align(rs);
+ char* tbase = (char*)(CALL_MMAP(tsize));
+ if (tbase != CMFAIL) {
+ m = init_user_mstate(tbase, tsize);
+ m->seg.sflags = IS_MMAPPED_BIT;
+ set_lock(m, locked);
+ }
+ }
+ return (mspace)m;
+}
+
+mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
+ mstate m = 0;
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ init_mparams(); /* Ensure pagesize etc initialized */
+
+ if (capacity > msize + TOP_FOOT_SIZE &&
+ capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+ m = init_user_mstate((char*)base, capacity);
+ m->seg.sflags = EXTERN_BIT;
+ set_lock(m, locked);
+ }
+ return (mspace)m;
+}
+
+size_t destroy_mspace(mspace msp) {
+ size_t freed = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ msegmentptr sp = &ms->seg;
+ while (sp != 0) {
+ char* base = sp->base;
+ size_t size = sp->size;
+ flag_t flag = sp->sflags;
+ sp = sp->next;
+ if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
+ CALL_MUNMAP(base, size) == 0)
+ freed += size;
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return freed;
+}
+
+/*
+ mspace versions of routines are near-clones of the global
+ versions. This is not so nice but better than the alternatives.
+*/
+
+
+void* mspace_malloc(mspace msp, size_t bytes) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (!PREACTION(ms)) {
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+ idx = small_index(nb);
+ smallbits = ms->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(ms, idx);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(idx));
+ unlink_first_small_chunk(ms, b, p, idx);
+ set_inuse_and_pinuse(ms, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb > ms->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ b = smallbin_at(ms, i);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(i));
+ unlink_first_small_chunk(ms, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(ms, p, small_index2size(i));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(ms, r, rsize);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else {
+ nb = pad_request(bytes);
+ if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+ }
+
+ if (nb <= ms->dvsize) {
+ size_t rsize = ms->dvsize - nb;
+ mchunkptr p = ms->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
+ ms->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ }
+ else { /* exhaust dv */
+ size_t dvs = ms->dvsize;
+ ms->dvsize = 0;
+ ms->dv = 0;
+ set_inuse_and_pinuse(ms, p, dvs);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb < ms->topsize) { /* Split top */
+ size_t rsize = ms->topsize -= nb;
+ mchunkptr p = ms->top;
+ mchunkptr r = ms->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ mem = chunk2mem(p);
+ check_top_chunk(ms, ms->top);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ mem = sys_alloc(ms, nb);
+
+ postaction:
+ POSTACTION(ms);
+ return mem;
+ }
+
+ return 0;
+}
+
+void mspace_free(mspace msp, void* mem) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+#if FOOTERS
+ mstate fm = get_mstate_for(p);
+#else /* FOOTERS */
+ mstate fm = (mstate)msp;
+#endif /* FOOTERS */
+ if (!ok_magic(fm)) {
+ USAGE_ERROR_ACTION(fm, p);
+ return;
+ }
+ if (!PREACTION(fm)) {
+ check_inuse_chunk(fm, p);
+ if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if ((prevsize & IS_MMAPPED_BIT) != 0) {
+ prevsize &= ~IS_MMAPPED_BIT;
+ psize += prevsize + MMAP_FOOT_PAD;
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+ fm->footprint -= psize;
+ goto postaction;
+ }
+ else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ goto postaction;
+ }
+ }
+ else
+ goto erroraction;
+ }
+ }
+
+ if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (should_trim(fm, tsize))
+ sys_trim(fm, 0);
+ goto postaction;
+ }
+ else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ goto postaction;
+ }
+ else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ goto postaction;
+ }
+ }
+ }
+ else
+ set_free_with_pinuse(p, psize, next);
+ insert_chunk(fm, p, psize);
+ check_free_chunk(fm, p);
+ goto postaction;
+ }
+ }
+ erroraction:
+ USAGE_ERROR_ACTION(fm, p);
+ postaction:
+ POSTACTION(fm);
+ }
+ }
+}
+
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
+ void* mem;
+ size_t req = 0;
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (n_elements != 0) {
+ req = n_elements * elem_size;
+ if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+ (req / n_elements != elem_size))
+ req = MAX_SIZE_T; /* force downstream failure on overflow */
+ }
+ mem = internal_malloc(ms, req);
+ if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
+ memset(mem, 0, req);
+ return mem;
+}
+
+void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
+ if (oldmem == 0)
+ return mspace_malloc(msp, bytes);
+#ifdef REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0) {
+ mspace_free(msp, oldmem);
+ return 0;
+ }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+ else {
+#if FOOTERS
+ mchunkptr p = mem2chunk(oldmem);
+ mstate ms = get_mstate_for(p);
+#else /* FOOTERS */
+ mstate ms = (mstate)msp;
+#endif /* FOOTERS */
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return internal_realloc(ms, oldmem, bytes);
+ }
+}
+
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return internal_memalign(ms, alignment, bytes);
+}
+
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]) {
+ size_t sz = elem_size; /* serves as 1-element array */
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return ialloc(ms, n_elements, &sz, 3, chunks);
+}
+
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return ialloc(ms, n_elements, sizes, 0, chunks);
+}
+
+int mspace_trim(mspace msp, size_t pad) {
+ int result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ if (!PREACTION(ms)) {
+ result = sys_trim(ms, pad);
+ POSTACTION(ms);
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+void mspace_malloc_stats(mspace msp) {
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ internal_malloc_stats(ms);
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+}
+
+size_t mspace_footprint(mspace msp) {
+ size_t result;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ result = ms->footprint;
+ }
+ USAGE_ERROR_ACTION(ms,ms);
+ return result;
+}
+
+
+size_t mspace_max_footprint(mspace msp) {
+ size_t result;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ result = ms->max_footprint;
+ }
+ USAGE_ERROR_ACTION(ms,ms);
+ return result;
+}
+
+
+#if !NO_MALLINFO
+struct mallinfo mspace_mallinfo(mspace msp) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return internal_mallinfo(ms);
+}
+#endif /* NO_MALLINFO */
+
+int mspace_mallopt(int param_number, int value) {
+ return change_mparam(param_number, value);
+}
+
+#endif /* MSPACES */
+
+/* -------------------- Alternative MORECORE functions ------------------- */
+
+/*
+ Guidelines for creating a custom version of MORECORE:
+
+ * For best performance, MORECORE should allocate in multiples of pagesize.
+ * MORECORE may allocate more memory than requested. (Or even less,
+ but this will usually result in a malloc failure.)
+ * MORECORE must not allocate memory when given argument zero, but
+ instead return one past the end address of memory from previous
+ nonzero call.
+ * For best performance, consecutive calls to MORECORE with positive
+ arguments should return increasing addresses, indicating that
+ space has been contiguously extended.
+ * Even though consecutive calls to MORECORE need not return contiguous
+ addresses, it must be OK for malloc'ed chunks to span multiple
+ regions in those cases where they do happen to be contiguous.
+ * MORECORE need not handle negative arguments -- it may instead
+ just return MFAIL when given negative arguments.
+ Negative arguments are always multiples of pagesize. MORECORE
+ must not misinterpret negative args as large positive unsigned
+ args. You can suppress all such calls from even occurring by defining
+ MORECORE_CANNOT_TRIM,
+
+ As an example alternative MORECORE, here is a custom allocator
+ kindly contributed for pre-OSX macOS. It uses virtually but not
+ necessarily physically contiguous non-paged memory (locked in,
+ present and won't get swapped out). You can use it by uncommenting
+ this section, adding some #includes, and setting up the appropriate
+ defines above:
+
+ #define MORECORE osMoreCore
+
+ There is also a shutdown routine that should somehow be called for
+ cleanup upon program exit.
+
+ #define MAX_POOL_ENTRIES 100
+ #define MINIMUM_MORECORE_SIZE (64 * 1024U)
+ static int next_os_pool;
+ void *our_os_pools[MAX_POOL_ENTRIES];
+
+ void *osMoreCore(int size)
+ {
+ void *ptr = 0;
+ static void *sbrk_top = 0;
+
+ if (size > 0)
+ {
+ if (size < MINIMUM_MORECORE_SIZE)
+ size = MINIMUM_MORECORE_SIZE;
+ if (CurrentExecutionLevel() == kTaskLevel)
+ ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
+ if (ptr == 0)
+ {
+ return (void *) MFAIL;
+ }
+ // save ptrs so they can be freed during cleanup
+ our_os_pools[next_os_pool] = ptr;
+ next_os_pool++;
+ ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
+ sbrk_top = (char *) ptr + size;
+ return ptr;
+ }
+ else if (size < 0)
+ {
+ // we don't currently support shrink behavior
+ return (void *) MFAIL;
+ }
+ else
+ {
+ return sbrk_top;
+ }
+ }
+
+ // cleanup any allocated memory pools
+ // called as last thing before shutting down driver
+
+ void osCleanupMem(void)
+ {
+ void **ptr;
+
+ for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
+ if (*ptr)
+ {
+ PoolDeallocate(*ptr);
+ *ptr = 0;
+ }
+ }
+
+*/
+
+
+/* -----------------------------------------------------------------------
+History:
+ V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
+ * Add max_footprint functions
+ * Ensure all appropriate literals are size_t
+ * Fix conditional compilation problem for some #define settings
+ * Avoid concatenating segments with the one provided
+ in create_mspace_with_base
+ * Rename some variables to avoid compiler shadowing warnings
+ * Use explicit lock initialization.
+ * Better handling of sbrk interference.
+ * Simplify and fix segment insertion, trimming and mspace_destroy
+ * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
+ * Thanks especially to Dennis Flanagan for help on these.
+
+ V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
+ * Fix memalign brace error.
+
+ V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
+ * Fix improper #endif nesting in C++
+ * Add explicit casts needed for C++
+
+ V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
+ * Use trees for large bins
+ * Support mspaces
+ * Use segments to unify sbrk-based and mmap-based system allocation,
+ removing need for emulation on most platforms without sbrk.
+ * Default safety checks
+ * Optional footer checks. Thanks to William Robertson for the idea.
+ * Internal code refactoring
+ * Incorporate suggestions and platform-specific changes.
+ Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
+ Aaron Bachmann, Emery Berger, and others.
+ * Speed up non-fastbin processing enough to remove fastbins.
+ * Remove useless cfree() to avoid conflicts with other apps.
+ * Remove internal memcpy, memset. Compilers handle builtins better.
+ * Remove some options that no one ever used and rename others.
+
+ V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+ * Fix malloc_state bitmap array misdeclaration
+
+ V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
+ * Allow tuning of FIRST_SORTED_BIN_SIZE
+ * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
+ * Better detection and support for non-contiguousness of MORECORE.
+ Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
+ * Bypass most of malloc if no frees. Thanks To Emery Berger.
+ * Fix freeing of old top non-contiguous chunk im sysmalloc.
+ * Raised default trim and map thresholds to 256K.
+ * Fix mmap-related #defines. Thanks to Lubos Lunak.
+ * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
+ * Branch-free bin calculation
+ * Default trim and mmap thresholds now 256K.
+
+ V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
+ * Introduce independent_comalloc and independent_calloc.
+ Thanks to Michael Pachos for motivation and help.
+ * Make optional .h file available
+ * Allow > 2GB requests on 32bit systems.
+ * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
+ Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
+ and Anonymous.
+ * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
+ helping test this.)
+ * memalign: check alignment arg
+ * realloc: don't try to shift chunks backwards, since this
+ leads to more fragmentation in some programs and doesn't
+ seem to help in any others.
+ * Collect all cases in malloc requiring system memory into sysmalloc
+ * Use mmap as backup to sbrk
+ * Place all internal state in malloc_state
+ * Introduce fastbins (although similar to 2.5.1)
+ * Many minor tunings and cosmetic improvements
+ * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
+ * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
+ Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
+ * Include errno.h to support default failure action.
+
+ V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
+ * return null for negative arguments
+ * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
+ * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
+ (e.g. WIN32 platforms)
+ * Cleanup header file inclusion for WIN32 platforms
+ * Cleanup code to avoid Microsoft Visual C++ compiler complaints
+ * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
+ memory allocation routines
+ * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
+ * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
+ usage of 'assert' in non-WIN32 code
+ * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
+ avoid infinite loop
+ * Always call 'fREe()' rather than 'free()'
+
+ V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
+ * Fixed ordering problem with boundary-stamping
+
+ V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
+ * Added pvalloc, as recommended by H.J. Liu
+ * Added 64bit pointer support mainly from Wolfram Gloger
+ * Added anonymously donated WIN32 sbrk emulation
+ * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
+ * malloc_extend_top: fix mask error that caused wastage after
+ foreign sbrks
+ * Add linux mremap support code from HJ Liu
+
+ V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
+ * Integrated most documentation with the code.
+ * Add support for mmap, with help from
+ Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+ * Use last_remainder in more cases.
+ * Pack bins using idea from colin@nyx10.cs.du.edu
+ * Use ordered bins instead of best-fit threshhold
+ * Eliminate block-local decls to simplify tracing and debugging.
+ * Support another case of realloc via move into top
+ * Fix error occuring when initial sbrk_base not word-aligned.
+ * Rely on page size for units instead of SBRK_UNIT to
+ avoid surprises about sbrk alignment conventions.
+ * Add mallinfo, mallopt. Thanks to Raymond Nijssen
+ (raymond@es.ele.tue.nl) for the suggestion.
+ * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
+ * More precautions for cases where other routines call sbrk,
+ courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+ * Added macros etc., allowing use in linux libc from
+ H.J. Lu (hjl@gnu.ai.mit.edu)
+ * Inverted this history list
+
+ V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
+ * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
+ * Removed all preallocation code since under current scheme
+ the work required to undo bad preallocations exceeds
+ the work saved in good cases for most test programs.
+ * No longer use return list or unconsolidated bins since
+ no scheme using them consistently outperforms those that don't
+ given above changes.
+ * Use best fit for very large chunks to prevent some worst-cases.
+ * Added some support for debugging
+
+ V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
+ * Removed footers when chunks are in use. Thanks to
+ Paul Wilson (wilson@cs.texas.edu) for the suggestion.
+
+ V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
+ * Added malloc_trim, with help from Wolfram Gloger
+ (wmglo@Dent.MED.Uni-Muenchen.DE).
+
+ V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
+
+ V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
+ * realloc: try to expand in both directions
+ * malloc: swap order of clean-bin strategy;
+ * realloc: only conditionally expand backwards
+ * Try not to scavenge used bins
+ * Use bin counts as a guide to preallocation
+ * Occasionally bin return list chunks in first scan
+ * Add a few optimizations from colin@nyx10.cs.du.edu
+
+ V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
+ * faster bin computation & slightly different binning
+ * merged all consolidations to one part of malloc proper
+ (eliminating old malloc_find_space & malloc_clean_bin)
+ * Scan 2 returns chunks (not just 1)
+ * Propagate failure in realloc if malloc returns 0
+ * Add stuff to allow compilation on non-ANSI compilers
+ from kpv@research.att.com
+
+ V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
+ * removed potential for odd address access in prev_chunk
+ * removed dependency on getpagesize.h
+ * misc cosmetics and a bit more internal documentation
+ * anticosmetics: mangled names in macros to evade debugger strangeness
+ * tested on sparc, hp-700, dec-mips, rs6000
+ with gcc & native cc (hp, dec only) allowing
+ Detlefs & Zorn comparison study (in SIGPLAN Notices.)
+
+ Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
+ * Based loosely on libg++-1.2X malloc. (It retains some of the overall
+ structure of old version, but most details differ.)
+
+*/
diff --git a/viengoos/malloc.h b/viengoos/malloc.h
new file mode 100644
index 0000000..1bb3ed8
--- /dev/null
+++ b/viengoos/malloc.h
@@ -0,0 +1,529 @@
+/*
+ Default header file for malloc-2.8.x, written by Doug Lea
+ and released to the public domain, as explained at
+ http://creativecommons.org/licenses/publicdomain.
+
+ last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
+
+ This header is for ANSI C/C++ only. You can set any of
+ the following #defines before including:
+
+ * If USE_DL_PREFIX is defined, it is assumed that malloc.c
+ was also compiled with this option, so all routines
+ have names starting with "dl".
+
+ * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
+ file will be #included AFTER <malloc.h>. This is needed only if
+ your system defines a struct mallinfo that is incompatible with the
+ standard one declared here. Otherwise, you can include this file
+ INSTEAD of your system system <malloc.h>. At least on ANSI, all
+ declarations should be compatible with system versions
+
+ * If MSPACES is defined, declarations for mspace versions are included.
+*/
+
+#ifndef MALLOC_280_H
+#define MALLOC_280_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h> /* for size_t */
+
+#if !ONLY_MSPACES
+
+#ifndef USE_DL_PREFIX
+#define dlcalloc calloc
+#define dlfree free
+#define dlmalloc malloc
+#define dlmemalign memalign
+#define dlrealloc realloc
+#define dlvalloc valloc
+#define dlpvalloc pvalloc
+#define dlmallinfo mallinfo
+#define dlmallopt mallopt
+#define dlmalloc_trim malloc_trim
+#define dlmalloc_stats malloc_stats
+#define dlmalloc_usable_size malloc_usable_size
+#define dlmalloc_footprint malloc_footprint
+#define dlindependent_calloc independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#endif /* USE_DL_PREFIX */
+
+
+/*
+ malloc(size_t n)
+ Returns a pointer to a newly allocated chunk of at least n bytes, or
+ null if no space is available, in which case errno is set to ENOMEM
+ on ANSI C systems.
+
+ If n is zero, malloc returns a minimum-sized chunk. (The minimum
+ size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+ systems.) Note that size_t is an unsigned type, so calls with
+ arguments that would be negative if signed are interpreted as
+ requests for huge amounts of space, which will often fail. The
+ maximum supported value of n differs across systems, but is in all
+ cases less than the maximum representable value of a size_t.
+*/
+void* dlmalloc(size_t);
+
+/*
+ free(void* p)
+ Releases the chunk of memory pointed to by p, that had been previously
+ allocated using malloc or a related routine such as realloc.
+ It has no effect if p is null. If p was not malloced or already
+ freed, free(p) will by default cuase the current program to abort.
+*/
+void dlfree(void*);
+
+/*
+ calloc(size_t n_elements, size_t element_size);
+ Returns a pointer to n_elements * element_size bytes, with all locations
+ set to zero.
+*/
+void* dlcalloc(size_t, size_t);
+
+/*
+ realloc(void* p, size_t n)
+ Returns a pointer to a chunk of size n that contains the same data
+ as does chunk p up to the minimum of (n, p's size) bytes, or null
+ if no space is available.
+
+ The returned pointer may or may not be the same as p. The algorithm
+ prefers extending p in most cases when possible, otherwise it
+ employs the equivalent of a malloc-copy-free sequence.
+
+ If p is null, realloc is equivalent to malloc.
+
+ If space is not available, realloc returns null, errno is set (if on
+ ANSI) and p is NOT freed.
+
+ if n is for fewer bytes than already held by p, the newly unused
+ space is lopped off and freed if possible. realloc with a size
+ argument of zero (re)allocates a minimum-sized chunk.
+
+ The old unix realloc convention of allowing the last-free'd chunk
+ to be used as an argument to realloc is not supported.
+*/
+
+void* dlrealloc(void*, size_t);
+
+/*
+ memalign(size_t alignment, size_t n);
+ Returns a pointer to a newly allocated chunk of n bytes, aligned
+ in accord with the alignment argument.
+
+ The alignment argument should be a power of two. If the argument is
+ not a power of two, the nearest greater power is used.
+ 8-byte alignment is guaranteed by normal malloc calls, so don't
+ bother calling memalign with an argument of 8 or less.
+
+ Overreliance on memalign is a sure way to fragment space.
+*/
+void* dlmemalign(size_t, size_t);
+
+/*
+ valloc(size_t n);
+ Equivalent to memalign(pagesize, n), where pagesize is the page
+ size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void* dlvalloc(size_t);
+
+/*
+ mallopt(int parameter_number, int parameter_value)
+ Sets tunable parameters The format is to provide a
+ (parameter-number, parameter-value) pair. mallopt then sets the
+ corresponding parameter to the argument value if it can (i.e., so
+ long as the value is meaningful), and returns 1 if successful else
+ 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
+ normally defined in malloc.h. None of these are use in this malloc,
+ so setting them has no effect. But this malloc also supports other
+ options in mallopt:
+
+ Symbol param # default allowed param values
+ M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
+ M_GRANULARITY -2 page size any power of 2 >= page size
+ M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
+*/
+int dlmallopt(int, int);
+
+#define M_TRIM_THRESHOLD (-1)
+#define M_GRANULARITY (-2)
+#define M_MMAP_THRESHOLD (-3)
+
+
+/*
+ malloc_footprint();
+ Returns the number of bytes obtained from the system. The total
+ number of bytes allocated by malloc, realloc etc., is less than this
+ value. Unlike mallinfo, this function returns only a precomputed
+ result, so can be called frequently to monitor memory consumption.
+ Even if locks are otherwise defined, this function does not use them,
+ so results might not be up to date.
+*/
+size_t dlmalloc_footprint();
+
+#if !NO_MALLINFO
+/*
+ mallinfo()
+ Returns (by copy) a struct containing various summary statistics:
+
+ arena: current total non-mmapped bytes allocated from system
+ ordblks: the number of free chunks
+ smblks: always zero.
+ hblks: current number of mmapped regions
+ hblkhd: total bytes held in mmapped regions
+ usmblks: the maximum total allocated space. This will be greater
+ than current total if trimming has occurred.
+ fsmblks: always zero
+ uordblks: current total allocated space (normal or mmapped)
+ fordblks: total free space
+ keepcost: the maximum number of bytes that could ideally be released
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
+
+ Because these fields are ints, but internal bookkeeping may
+ be kept as longs, the reported values may wrap around zero and
+ thus be inaccurate.
+*/
+#ifndef HAVE_USR_INCLUDE_MALLOC_H
+#ifndef _MALLOC_H
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+struct mallinfo {
+ MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
+ MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
+ MALLINFO_FIELD_TYPE smblks; /* always 0 */
+ MALLINFO_FIELD_TYPE hblks; /* always 0 */
+ MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
+ MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
+ MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+ MALLINFO_FIELD_TYPE fordblks; /* total free space */
+ MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
+};
+#endif /* _MALLOC_H */
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+
+struct mallinfo dlmallinfo(void);
+#endif /* NO_MALLINFO */
+
+/*
+ independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+ independent_calloc is similar to calloc, but instead of returning a
+ single cleared space, it returns an array of pointers to n_elements
+ independent elements that can hold contents of size elem_size, each
+ of which starts out cleared, and can be independently freed,
+ realloc'ed etc. The elements are guaranteed to be adjacently
+ allocated (this is not guaranteed to occur with multiple callocs or
+ mallocs), which may also improve cache locality in some
+ applications.
+
+ The "chunks" argument is optional (i.e., may be null, which is
+ probably the most typical usage). If it is null, the returned array
+ is itself dynamically allocated and should also be freed when it is
+ no longer needed. Otherwise, the chunks array must be of at least
+ n_elements in length. It is filled in with the pointers to the
+ chunks.
+
+ In either case, independent_calloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and "chunks"
+ is null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use regular calloc and assign pointers into this
+ space to represent elements. (In this case though, you cannot
+ independently free elements.)
+
+ independent_calloc simplifies and speeds up implementations of many
+ kinds of pools. It may also be useful when constructing large data
+ structures that initially have a fixed number of fixed-sized nodes,
+ but the number is not known at compile time, and some of the nodes
+ may later need to be freed. For example:
+
+ struct Node { int item; struct Node* next; };
+
+ struct Node* build_list() {
+ struct Node** pool;
+ int n = read_number_of_nodes_needed();
+ if (n <= 0) return 0;
+ pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+ if (pool == 0) die();
+ // organize into a linked list...
+ struct Node* first = pool[0];
+ for (i = 0; i < n-1; ++i)
+ pool[i]->next = pool[i+1];
+ free(pool); // Can now free the array (or not, if it is needed later)
+ return first;
+ }
+*/
+void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+ independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+ independent_comalloc allocates, all at once, a set of n_elements
+ chunks with sizes indicated in the "sizes" array. It returns
+ an array of pointers to these elements, each of which can be
+ independently freed, realloc'ed etc. The elements are guaranteed to
+ be adjacently allocated (this is not guaranteed to occur with
+ multiple callocs or mallocs), which may also improve cache locality
+ in some applications.
+
+ The "chunks" argument is optional (i.e., may be null). If it is null
+ the returned array is itself dynamically allocated and should also
+ be freed when it is no longer needed. Otherwise, the chunks array
+ must be of at least n_elements in length. It is filled in with the
+ pointers to the chunks.
+
+ In either case, independent_comalloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and chunks is
+ null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use a single regular malloc, and assign pointers at
+ particular offsets in the aggregate space. (In this case though, you
+ cannot independently free elements.)
+
+ independent_comallac differs from independent_calloc in that each
+ element may have a different size, and also that it does not
+ automatically clear elements.
+
+ independent_comalloc can be used to speed up allocation in cases
+ where several structs or objects must always be allocated at the
+ same time. For example:
+
+ struct Head { ... }
+ struct Foot { ... }
+
+ void send_message(char* msg) {
+ int msglen = strlen(msg);
+ size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+ void* chunks[3];
+ if (independent_comalloc(3, sizes, chunks) == 0)
+ die();
+ struct Head* head = (struct Head*)(chunks[0]);
+ char* body = (char*)(chunks[1]);
+ struct Foot* foot = (struct Foot*)(chunks[2]);
+ // ...
+ }
+
+ In general though, independent_comalloc is worth using only for
+ larger values of n_elements. For small values, you probably won't
+ detect enough difference from series of malloc calls to bother.
+
+ Overuse of independent_comalloc can increase overall memory usage,
+ since it cannot reuse existing noncontiguous small chunks that
+ might be available for some of the elements.
+*/
+void** dlindependent_comalloc(size_t, size_t*, void**);
+
+
+/*
+ pvalloc(size_t n);
+ Equivalent to valloc(minimum-page-that-holds(n)), that is,
+ round up n to nearest pagesize.
+ */
+void* dlpvalloc(size_t);
+
+/*
+ malloc_trim(size_t pad);
+
+ If possible, gives memory back to the system (via negative arguments
+ to sbrk) if there is unused memory at the `high' end of the malloc
+ pool or in unused MMAP segments. You can call this after freeing
+ large blocks of memory to potentially reduce the system-level memory
+ requirements of a program. However, it cannot guarantee to reduce
+ memory. Under some allocation patterns, some large free blocks of
+ memory will be locked between two used chunks, so they cannot be
+ given back to the system.
+
+ The `pad' argument to malloc_trim represents the amount of free
+ trailing space to leave untrimmed. If this argument is zero, only
+ the minimum amount of memory to maintain internal data structures
+ will be left. Non-zero arguments can be supplied to maintain enough
+ trailing space to service future expected allocations without having
+ to re-obtain memory from the system.
+
+ Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+int dlmalloc_trim(size_t);
+
+/*
+ malloc_usable_size(void* p);
+
+ Returns the number of bytes you can actually use in
+ an allocated chunk, which may be more than you requested (although
+ often not) due to alignment and minimum size constraints.
+ You can use this many bytes without worrying about
+ overwriting other allocated objects. This is not a particularly great
+ programming practice. malloc_usable_size can be more useful in
+ debugging and assertions, for example:
+
+ p = malloc(n);
+ assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+/*
+ malloc_stats();
+ Prints on stderr the amount of space obtained from the system (both
+ via sbrk and mmap), the maximum amount (which may be more than
+ current if malloc_trim and/or munmap got called), and the current
+ number of bytes allocated via malloc (or realloc, etc) but not yet
+ freed. Note that this is the number of bytes allocated, not the
+ number requested. It will be larger than the number requested
+ because of alignment and bookkeeping overhead. Because it includes
+ alignment wastage as being in use, this figure may be greater than
+ zero even when no user-level chunks are allocated.
+
+ The reported current and maximum system memory can be inaccurate if
+ a program makes other calls to system memory allocation functions
+ (normally sbrk) outside of malloc.
+
+ malloc_stats prints only the most commonly interesting statistics.
+ More information can be obtained by calling mallinfo.
+*/
+void dlmalloc_stats();
+
+#endif /* !ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+ mspace is an opaque type representing an independent
+ region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+ create_mspace creates and returns a new independent space with the
+ given initial capacity, or, if 0, the default granularity size. It
+ returns null if there is no system memory available to create the
+ space. If argument locked is non-zero, the space uses a separate
+ lock to control access. The capacity of the space will grow
+ dynamically as needed to service mspace_malloc requests. You can
+ control the sizes of incremental increases of this space by
+ compiling with a different DEFAULT_GRANULARITY or dynamically
+ setting with mallopt(M_GRANULARITY, value).
+*/
+mspace create_mspace(size_t capacity, int locked);
+
+/*
+ destroy_mspace destroys the given space, and attempts to return all
+ of its memory back to the system, returning the total number of
+ bytes freed. After destruction, the results of access to all memory
+ used by the space become undefined.
+*/
+size_t destroy_mspace(mspace msp);
+
+/*
+ create_mspace_with_base uses the memory supplied as the initial base
+ of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+ space is used for bookkeeping, so the capacity must be at least this
+ large. (Otherwise 0 is returned.) When this initial space is
+ exhausted, additional memory will be obtained from the system.
+ Destroying this space will deallocate all additionally allocated
+ space (if possible) but not the initial base.
+*/
+mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+ mspace_malloc behaves as malloc, but operates within
+ the given space.
+*/
+void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+ mspace_free behaves as free, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_free is not actually needed.
+ free may be called instead of mspace_free because freed chunks from
+ any space are handled by their originating spaces.
+*/
+void mspace_free(mspace msp, void* mem);
+
+/*
+ mspace_realloc behaves as realloc, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_realloc is not actually
+ needed. realloc may be called instead of mspace_realloc because
+ realloced chunks from any space are handled by their originating
+ spaces.
+*/
+void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+ mspace_calloc behaves as calloc, but operates within
+ the given space.
+*/
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+ mspace_memalign behaves as memalign, but operates within
+ the given space.
+*/
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+ mspace_independent_calloc behaves as independent_calloc, but
+ operates within the given space.
+*/
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]);
+
+/*
+ mspace_independent_comalloc behaves as independent_comalloc, but
+ operates within the given space.
+*/
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]);
+
+/*
+ mspace_footprint() returns the number of bytes obtained from the
+ system for this space.
+*/
+size_t mspace_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+ mspace_mallinfo behaves as mallinfo, but reports properties of
+ the given space.
+*/
+struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+ mspace_malloc_stats behaves as malloc_stats, but reports
+ properties of the given space.
+*/
+void mspace_malloc_stats(mspace msp);
+
+/*
+ mspace_trim behaves as malloc_trim, but
+ operates within the given space.
+*/
+int mspace_trim(mspace msp, size_t pad);
+
+/*
+ An alias for mallopt.
+*/
+int mspace_mallopt(int, int);
+
+#endif /* MSPACES */
+
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif
+
+#endif /* MALLOC_280_H */
diff --git a/viengoos/memory.c b/viengoos/memory.c
new file mode 100644
index 0000000..26ece15
--- /dev/null
+++ b/viengoos/memory.c
@@ -0,0 +1,358 @@
+/* memory.c - Basic memory management routines.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "zalloc.h"
+#include "memory.h"
+
+#include <string.h>
+
+#include <l4.h>
+#include <hurd/btree.h>
+#include <hurd/stddef.h>
+
+#ifdef _L4_TEST_ENVIRONMENT
+#include <sys/mman.h>
+#else
+#include "sigma0.h"
+#endif
+
+l4_word_t first_frame;
+l4_word_t last_frame;
+
+struct region
+{
+ l4_word_t start;
+ l4_word_t end;
+};
+
+struct reservation
+{
+ struct region region;
+ enum memory_reservation type;
+ /* If this descriptor is allocated. */
+ int used;
+
+ hurd_btree_node_t node;
+};
+
+#define MAX_RESERVATIONS 128
+static struct reservation reservation_pool[128];
+
+static int
+reservation_node_compare (const struct region *a,
+ const struct region *b)
+{
+ if (a->end < b->start)
+ /* A ends before B starts. */
+ return -1;
+ if (b->end < a->start)
+ /* B ends before A starts. */
+ return 1;
+ /* Overlap. */
+ return 0;
+}
+
+BTREE_CLASS (reservation, struct reservation, struct region, region, node,
+ reservation_node_compare);
+
+static hurd_btree_reservation_t reservations;
+
+bool
+memory_reserve (l4_word_t start, l4_word_t end,
+ enum memory_reservation type)
+{
+ assert (start < end);
+
+ struct region region = { start, end };
+
+ debug (5, "Reserving region 0x%x-0x%x (%d)", start, end, type);
+
+ /* Check for overlap. */
+ struct reservation *overlap
+ = hurd_btree_reservation_find (&reservations, &region);
+ if (overlap)
+ {
+ debug (5, "Region 0x%x-0x%x overlaps with region 0x%x-0x%x",
+ start, end, overlap->region.start, overlap->region.end);
+ return false;
+ }
+
+ /* See if we can coalesce. */
+ region.start --;
+ region.end ++;
+ overlap = hurd_btree_reservation_find (&reservations, &region);
+ if (overlap)
+ {
+ struct reservation *right;
+ struct reservation *left;
+
+ if (overlap->region.start == end + 1)
+ /* OVERLAP starts just after END. */
+ {
+ right = overlap;
+ left = hurd_btree_reservation_prev (overlap);
+ }
+ else
+ /* OVERLAP starts just before START. */
+ {
+ assert (overlap->region.end + 1 == start);
+ left = overlap;
+ right = hurd_btree_reservation_next (overlap);
+ }
+
+ int coalesced = 0;
+
+ if (right->region.start == end + 1
+ && right->type == type)
+ /* We can coalesce with RIGHT. */
+ {
+ debug (5, "Coalescing with region 0x%x-0x%x",
+ right->region.start, right->region.end);
+ right->region.start = start;
+ coalesced = 1;
+ }
+ else
+ right = NULL;
+
+ if (left->region.end + 1 == start
+ && left->type == type)
+ /* We can coalesce with LEFT. */
+ {
+ debug (5, "Coalescing with region 0x%x-0x%x",
+ left->region.start, left->region.end);
+
+ if (right)
+ {
+ /* We coalesce with LEFT and RIGHT. */
+ left->region.end = overlap->region.end;
+
+ /* Deallocate RIGHT. */
+ hurd_btree_reservation_detach (&reservations, right);
+ right->used = 0;
+ }
+ left->region.end = end;
+
+ coalesced = 1;
+ }
+
+ if (coalesced)
+ return true;
+ }
+
+ /* There are no regions with which we can coalesce. Allocate a new
+ descriptor. */
+ int i;
+ for (i = 0; i < MAX_RESERVATIONS; i ++)
+ if (! reservation_pool[i].used)
+ {
+ reservation_pool[i].used = 1;
+ reservation_pool[i].region.start = start;
+ reservation_pool[i].region.end = end;
+ reservation_pool[i].type = type;
+
+ struct reservation *r
+ = hurd_btree_reservation_insert (&reservations,
+ &reservation_pool[i]);
+ if (r)
+ panic ("Error inserting reservation!");
+ return true;
+ }
+
+ panic ("No reservation descriptors available.");
+ return false;
+}
+
+void
+memory_reserve_dump (void)
+{
+ debug (3, "Reserved regions:");
+ struct reservation *r;
+ for (r = hurd_btree_reservation_first (&reservations);
+ r; r = hurd_btree_reservation_next (r))
+ debug (3, " 0x%x-0x%x", r->region.start, r->region.end);
+}
+
+bool
+memory_is_reserved (l4_word_t start, l4_word_t end,
+ l4_word_t *start_reservation,
+ l4_word_t *end_reservation)
+{
+ assert (start < end);
+
+ struct region region = { start, end };
+
+ struct reservation *overlap = hurd_btree_reservation_find (&reservations,
+ &region);
+ if (! overlap)
+ /* No overlap. */
+ return false;
+
+ /* Find the first region that overlaps with REGION. */
+ struct reservation *prev = overlap;
+ do
+ {
+ overlap = prev;
+ prev = hurd_btree_reservation_prev (overlap);
+ }
+ while (prev && prev->region.end > start);
+
+ debug (5, "Region 0x%x-0x%x overlaps with reserved region 0x%x-0x%x",
+ start, end, overlap->region.start, overlap->region.end);
+
+ *start_reservation
+ = overlap->region.start > start ? overlap->region.start : start;
+ *end_reservation = overlap->region.end < end ? overlap->region.end : end;
+ return true;
+}
+
+/* Add the memory starting at byte START and continuing until byte END
+ to the free pool. START must name the first byte in a page and END
+ the last. */
+static void
+memory_add (l4_word_t start, l4_word_t end)
+{
+ assert ((start & (PAGESIZE - 1)) == 0);
+ assert ((end & (PAGESIZE - 1)) == (PAGESIZE - 1));
+
+ debug (5, "Request to add physical memory 0x%x-0x%x", start, end);
+
+ l4_word_t start_reservation;
+ l4_word_t end_reservation;
+
+ while (start < end)
+ {
+ if (! memory_is_reserved (start, end,
+ &start_reservation, &end_reservation))
+ start_reservation = end_reservation = end + 1;
+ else
+ /* Round the start of the reservation down. */
+ {
+ start_reservation &= ~(PAGESIZE - 1);
+ debug (5, "Not adding reserved memory 0x%x-0x%x",
+ start_reservation, end_reservation);
+ }
+
+ if (start_reservation - start > 0)
+ {
+ debug (5, "Adding physical memory 0x%x-0x%x",
+ start, start_reservation - 1);
+ zfree (start, start_reservation - start);
+ }
+
+ /* Set START to first page after the end of the reservation. */
+ start = (end_reservation + PAGESIZE - 1)
+ & ~(PAGESIZE - 1);
+ }
+}
+
+void
+memory_reservation_clear (enum memory_reservation type)
+{
+ struct reservation *r = hurd_btree_reservation_first (&reservations);
+ while (r)
+ {
+ struct reservation *next = hurd_btree_reservation_next (r);
+
+ if (r->type == type)
+ /* We can clear this reserved region. */
+ {
+ hurd_btree_reservation_detach (&reservations, r);
+ r->used = 0;
+
+ memory_add (r->region.start & ~(PAGESIZE - 1),
+ (r->region.end & ~(PAGESIZE - 1))
+ + PAGESIZE - 1);
+ }
+
+ r = next;
+ }
+}
+
+void
+memory_grab (void)
+{
+ bool first = true;
+
+ void add (l4_word_t addr, l4_word_t length)
+ {
+ if (first || addr < first_frame)
+ first_frame = addr;
+ if (first || addr + length - PAGESIZE > last_frame)
+ last_frame = addr + length - PAGESIZE;
+ if (first)
+ first = false;
+
+ memory_add (addr, addr + length - 1);
+ }
+
+#ifdef _L4_TEST_ENVIRONMENT
+ extern char _start;
+ extern char _end;
+
+#define SIZE 8 * 1024 * 1024
+ void *m = mmap (NULL, SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ assert_perror (errno);
+ if (m == MAP_FAILED)
+ panic ("No memory: %m");
+ add ((l4_word_t) m, SIZE);
+
+ /* Add binary. */
+ l4_word_t s = (l4_word_t) &_start & ~(PAGESIZE - 1);
+ l4_word_t e = ((((l4_word_t) &_end) + PAGESIZE) & ~(PAGESIZE - 1)) - 1;
+ add (s, e - s + 1);
+
+#else
+ l4_word_t s;
+ l4_fpage_t fpage;
+ /* Try with the largest fpage possible. */
+ for (s = L4_WORDSIZE - 1; s >= l4_min_page_size_log2 (); s --)
+ /* Keep getting pages of size 2^S. */
+ while (! l4_is_nil_fpage (fpage = sigma0_get_any (s)))
+ /* FPAGE is an fpage of size 2^S. Add each non-reserved base
+ frame to the free list. */
+ add (l4_address (fpage), l4_size (fpage));
+#endif
+
+#ifndef NDEBUG
+ do_debug (3)
+ zalloc_dump_zones (__func__);
+#endif
+}
+
+l4_word_t
+memory_frame_allocate (void)
+{
+ l4_word_t f = zalloc (PAGESIZE);
+ memset ((void *) f, 0, PAGESIZE);
+ return f;
+}
+
+void
+memory_frame_free (l4_word_t addr)
+{
+ /* It better be page aligned. */
+ assert ((addr & (PAGESIZE - 1)) == 0);
+ /* It better be memory we know about. */
+ assert (first_frame <= addr);
+ assert (addr <= last_frame);
+
+ zfree (addr, PAGESIZE);
+}
diff --git a/viengoos/memory.h b/viengoos/memory.h
new file mode 100644
index 0000000..a90965d
--- /dev/null
+++ b/viengoos/memory.h
@@ -0,0 +1,76 @@
+/* memory.h - Basic memory management interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_MEMORY_H
+#define RM_MEMORY_H
+
+#include <l4.h>
+
+enum memory_reservation
+ {
+ /* Our binary, never freed. */
+ memory_reservation_self = 1,
+ /* Memory used during the initialization. */
+ memory_reservation_init,
+ /* Memory used by boot modules. */
+ memory_reservation_system_executable,
+ /* Memory used by boot modules. */
+ memory_reservation_modules,
+ };
+
+/* Address of the first byte of the first frame. */
+extern l4_word_t first_frame;
+/* Address of the first byte of the last frame. */
+extern l4_word_t last_frame;
+
+/* Reserve the memory starting at byte START and ending at byte END
+ with the reservation RESERVATION. The memory is added to the free
+ pool if and when the reservation expires. Returns true on success.
+ Otherwise false, if the reservation could not be completed because
+ of an existing reservation. */
+extern bool memory_reserve (l4_word_t start, l4_word_t end,
+ enum memory_reservation reservation);
+
+/* Print the reserved regions. */
+extern void memory_reserve_dump (void);
+
+/* If there is reserved memory occuring on or after byte START and on
+ or before byte END, return true and the first byte of the first
+ contiguous region in *START_RESERVATION and the last byte in
+ *END_RESERVATION. Otherwise, false. */
+extern bool memory_is_reserved (l4_word_t start, l4_word_t end,
+ l4_word_t *start_reservation,
+ l4_word_t *end_reservation);
+
+/* Cause the reservation RESERVATION to expire. Add all memory with
+ this reservation to the free pool. */
+extern void memory_reservation_clear (enum memory_reservation reservation);
+
+/* Grab all the memory in the system. */
+extern void memory_grab (void);
+
+/* Allocate a page of memory. Returns NULL if there is no memory
+ available. */
+extern l4_word_t memory_frame_allocate (void);
+
+/* Return the frame starting at address ADDR to the free pool. */
+extern void memory_frame_free (l4_word_t addr);
+
+#endif
diff --git a/viengoos/mmap.c b/viengoos/mmap.c
new file mode 100644
index 0000000..8109e0b
--- /dev/null
+++ b/viengoos/mmap.c
@@ -0,0 +1,57 @@
+/* mmap.c - A simple mmap for anonymous memory allocations in physmem.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <hurd/stddef.h>
+#include <sys/mman.h>
+
+#include "zalloc.h"
+
+
+void *
+mmap (void *address, size_t length, int protect, int flags,
+ int filedes, off_t offset)
+{
+ debug (4, "Allocation request for %d bytes", length);
+
+ if (address)
+ panic ("mmap called with non-zero ADDRESS");
+ if (flags != (MAP_PRIVATE | MAP_ANONYMOUS))
+ panic ("mmap called with invalid flags");
+ if (protect != (PROT_READ | PROT_WRITE))
+ panic ("mmap called with invalid protection");
+
+ /* At this point, we can safely ignore FILEDES and OFFSET. */
+ void *r = ((void *) zalloc (length)) ?: (void *) -1;
+ debug (4, "=> %p", r);
+ return r;
+}
+
+
+int
+munmap (void *addr, size_t length)
+{
+ zfree ((l4_word_t) addr, length);
+ return 0;
+}
diff --git a/viengoos/multiboot.h b/viengoos/multiboot.h
new file mode 100644
index 0000000..59d7995
--- /dev/null
+++ b/viengoos/multiboot.h
@@ -0,0 +1,121 @@
+/* multiboot.h - the header for Multiboot */
+/* Copyright (C) 1999, 2001 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Macros. */
+
+/* The magic number for the Multiboot header. */
+#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
+
+/* The flags for the Multiboot header. */
+#ifdef __ELF__
+# define MULTIBOOT_HEADER_FLAGS 0x00000003
+#else
+# define MULTIBOOT_HEADER_FLAGS 0x00010003
+#endif
+
+/* The magic number passed by a Multiboot-compliant boot loader. */
+#define MULTIBOOT_BOOTLOADER_MAGIC 0x2BADB002
+
+#ifndef ASM
+/* Do not include here in boot.S. */
+
+/* Types. */
+
+#include <sys/types.h>
+
+/* The Multiboot header. */
+typedef struct multiboot_header
+{
+ uint32_t magic;
+ uint32_t flags;
+ uint32_t checksum;
+ uint32_t header_addr;
+ uint32_t load_addr;
+ uint32_t load_end_addr;
+ uint32_t bss_end_addr;
+ uint32_t entry_addr;
+} multiboot_header_t;
+
+/* The symbol table for a.out. */
+typedef struct aout_symbol_table
+{
+ uint32_t tabsize;
+ uint32_t strsize;
+ uint32_t addr;
+ uint32_t reserved;
+} aout_symbol_table_t;
+
+/* The section header table for ELF. */
+typedef struct elf_section_header_table
+{
+ uint32_t num;
+ uint32_t size;
+ uint32_t addr;
+ uint32_t shndx;
+} elf_section_header_table_t;
+
+/* The Multiboot information. */
+typedef struct multiboot_info
+{
+ uint32_t flags;
+ uint32_t mem_lower;
+ uint32_t mem_upper;
+ uint32_t boot_device;
+ uint32_t cmdline;
+ uint32_t mods_count;
+ uint32_t mods_addr;
+ union
+ {
+ aout_symbol_table_t aout_sym;
+ elf_section_header_table_t elf_sec;
+ } u;
+ uint32_t mmap_length;
+ uint32_t mmap_addr;
+ uint32_t drives_length;
+ uint32_t drives_addr;
+ uint32_t config_table;
+ uint32_t boot_loader_name;
+ uint32_t apm_table;
+ uint32_t vbe_control_info;
+ uint32_t vbe_mode_info;
+ uint32_t vbe_mode;
+ uint32_t vbe_interface_seg;
+ uint32_t vbe_interface_off;
+ uint32_t vbe_interface_len;
+} multiboot_info_t;
+
+/* The module structure. */
+typedef struct module
+{
+ uint32_t mod_start;
+ uint32_t mod_end;
+ uint32_t string;
+ uint32_t reserved;
+} module_t;
+
+/* The memory map. SIZE is the size of the structure except for the
+ SIZE field itself. */
+typedef struct memory_map
+{
+ uint32_t size;
+ uint64_t base_addr;
+ uint64_t length;
+ uint32_t type;
+} memory_map_t;
+
+#endif /* ! ASM */
diff --git a/viengoos/object.c b/viengoos/object.c
new file mode 100644
index 0000000..c3b82f1
--- /dev/null
+++ b/viengoos/object.c
@@ -0,0 +1,443 @@
+/* object.c - Object store management.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <l4.h>
+#include <string.h>
+#include <hurd/stddef.h>
+#include <hurd/ihash.h>
+#include <bit-array.h>
+
+#include "object.h"
+#include "activity.h"
+
+
+struct object_desc *object_descs;
+
+/* XXX: The number of in memory folios. (Recall: one folio => 512kb
+ storage.) */
+#define FOLIOS_CORE 256
+static unsigned char folios[FOLIOS_CORE / 8];
+
+/* Given an OID, we need a way to find 1) whether the object is
+ memory, and 2) if so, where. We achieve this using a hash. The
+ hash maps object OIDs to union object *s. */
+/* XXX: Although the current implementation of the has function
+ dynamically allocates memory according to demand, the maximum
+ amount of required memory can be calculated at startup. */
+/* XXX: A hash is key'd by a machine word, however, an oid is
+ 64-bits. */
+/* XXX: When dereferencing a capability slot, we look up the object
+ using the hash and then check that the version number stored in the
+ capability slot matchs that in the object. This likely incurs a
+ cache-line miss to read the version from the object descriptor. We
+ can elide this by hashing from the concatenation of the OID and the
+ version number but see the last point for why this is
+ problematic. */
+static struct hurd_ihash objects;
+
+/* The object OBJECT was just brought into memory. Set it up. */
+static void
+memory_object_setup (struct object *object)
+{
+ struct object_desc *odesc = object_to_object_desc (object);
+
+ debug (5, "Setting up 0x%llx (object %d)", odesc->oid,
+ ((uintptr_t) odesc - (uintptr_t) object_descs)
+ / sizeof (*odesc));
+
+ bool had_value;
+ hurd_ihash_value_t old_value;
+ error_t err = hurd_ihash_replace (&objects, odesc->oid, odesc,
+ &had_value, &old_value);
+ assert (err == 0);
+ /* If there was an old value, it better have the same value as what
+ we just added. */
+ assert (! had_value || old_value == odesc);
+}
+
+/* Release the object. */
+static void
+memory_object_destroy (struct activity *activity, struct object *object)
+{
+ struct object_desc *odesc = object_to_object_desc (object);
+
+ debug (5, "Destroy 0x%llx (object %d)", odesc->oid,
+ ((uintptr_t) odesc - (uintptr_t) object_descs)
+ / sizeof (*odesc));
+
+ struct cap cap = object_desc_to_cap (odesc);
+ cap_shootdown (activity, &cap);
+
+ hurd_ihash_locp_remove (&objects, odesc->locp);
+ assert (! hurd_ihash_find (&objects, odesc->oid));
+
+ /* XXX: Remove from linked lists! */
+
+
+#ifdef NDEBUG
+ memset (odesc, 0xde, sizeof (struct object_desc));
+#endif
+
+ /* Return the frame to the free pool. */
+ memory_frame_free ((l4_word_t) object);
+}
+
+void
+object_init (void)
+{
+ assert (sizeof (struct folio) <= PAGESIZE);
+
+ hurd_ihash_init (&objects, (int) (&((struct object_desc *)0)->locp));
+
+ /* Allocate enough object descriptors for the number of pages. */
+ object_descs = calloc (sizeof (struct object_desc),
+ ((last_frame - first_frame) / PAGESIZE + 1));
+ if (! object_descs)
+ panic ("Failed to allocate object descriptor array!\n");
+}
+
+struct object *
+object_find_soft (struct activity *activity, oid_t oid)
+{
+ struct object_desc *odesc = hurd_ihash_find (&objects, oid);
+ if (! odesc)
+ return NULL;
+ else
+ {
+ struct object *object = object_desc_to_object (odesc);
+ if (oid != odesc->oid)
+ {
+ debug (1, "oid (%llx) != desc oid (%llx)",
+ oid, odesc->oid);
+ }
+ assert (oid == odesc->oid);
+ return object;
+ }
+}
+
+
+struct object *
+object_find (struct activity *activity, oid_t oid)
+{
+ struct object *obj = object_find_soft (activity, oid);
+ if (obj)
+ return obj;
+
+ struct folio *folio;
+
+ int page = (oid % (FOLIO_OBJECTS + 1)) - 1;
+ if (page == -1)
+ /* The object to find is a folio. */
+ {
+ if (oid / (FOLIO_OBJECTS + 1) < FOLIOS_CORE)
+ /* It's an in-core folio. */
+ {
+ assert (bit_test (folios, oid / (FOLIO_OBJECTS + 1)));
+
+ obj = (struct object *) memory_frame_allocate ();
+ folio = (struct folio *) obj;
+ if (! folio)
+ {
+ /* XXX: Out of memory. Do some garbage collection. */
+ return NULL;
+ }
+
+ goto setup_desc;
+ }
+
+ /* It's not an in-memory folio. We read it from disk below. */
+ }
+ else
+ {
+ /* Find the folio corresponding to the object. */
+ folio = (struct folio *) object_find (activity, oid - page - 1);
+ assert (folio);
+
+ if (! folio->objects[page].content)
+ /* The object is a zero page. No need to read anything from
+ backing store: just allocate a page and zero it. */
+ {
+ obj = (struct object *) memory_frame_allocate ();
+ if (! obj)
+ {
+ /* XXX: Out of memory. Do some garbage collection. */
+ return NULL;
+ }
+
+ goto setup_desc;
+ }
+ }
+
+ /* Read the object from backing store. */
+
+ /* XXX: Do it. */
+ return NULL;
+
+ setup_desc:;
+ /* OBJ points to the in-memory copy of the object. Set up its
+ corresponding descriptor. */
+ struct object_desc *odesc = object_to_object_desc (obj);
+
+ if (page == -1)
+ /* It's a folio. */
+ {
+ odesc->type = cap_folio;
+ odesc->version = folio->folio_version;
+ }
+ else
+ {
+ odesc->type = folio->objects[page].type;
+ odesc->version = folio->objects[page].version;
+ }
+ odesc->oid = oid;
+ memory_object_setup (obj);
+
+ return obj;
+}
+
+void
+folio_reparent (struct activity *principal, struct folio *folio,
+ struct activity *new_parent)
+{
+ /* XXX: Implement this for the real "reparent" case (and not just
+ the parent case). */
+
+ /* Record the owner. */
+ struct object_desc *pdesc
+ = object_to_object_desc ((struct object *) new_parent);
+ assert (pdesc->type == cap_activity);
+ folio->activity.oid = pdesc->oid;
+ folio->activity.version = pdesc->version;
+ folio->activity.type = cap_activity;
+
+ /* Add FOLIO to ACTIVITY's list of allocated folios. */
+
+ /* Set FOLIO->NEXT to the current head. */
+ folio->next = new_parent->folios;
+ folio->prev.type = cap_void;
+
+ oid_t foid = object_to_object_desc ((struct object *) folio)->oid;
+
+ struct object *head = cap_to_object (principal, &new_parent->folios);
+ if (head)
+ /* Update the old head's previous pointer to point to FOLIO. */
+ {
+ struct object_desc *odesc = object_to_object_desc (head);
+ assert (odesc->type == cap_folio);
+
+ struct folio *h = (struct folio *) head;
+
+ struct object *head_prev = cap_to_object (principal, &h->prev);
+ assert (! head_prev);
+
+ h->prev.oid = foid;
+ h->prev.type = cap_folio;
+ h->prev.version = folio->folio_version;
+ }
+
+ /* Make FOLIO the head. */
+ new_parent->folios.oid = foid;
+ new_parent->folios.type = cap_folio;
+ new_parent->folios.version = folio->folio_version;
+}
+
+struct folio *
+folio_alloc (struct activity *activity)
+{
+ if (activity)
+ {
+ /* Check that the activity does not exceed its quota. */
+ /* XXX: Charge not only the activity but also its ancestors. */
+ if (activity->storage_quota
+ && activity->folio_count < activity->storage_quota)
+ activity->folio_count ++;
+ }
+#ifndef NDEBUG
+ else
+ {
+ static int once;
+ assert (! once);
+ once = 1;
+ }
+#endif
+
+ /* XXX: We only do in-memory folios right now. */
+ int f = bit_alloc (folios, sizeof (folios), 0);
+ if (f < 0)
+ panic ("Out of folios");
+ oid_t foid = f * (FOLIO_OBJECTS + 1);
+
+ /* We can't just allocate a fresh page as we need to preserve the
+ version information for the folio as well as the objects. */
+ struct folio *folio = (struct folio *) object_find (activity, foid);
+
+ if (activity)
+ folio_reparent (activity, folio, activity);
+
+ return folio;
+}
+
+void
+folio_free (struct activity *activity, struct folio *folio)
+{
+ /* NB: The activity freeing FOLIO may not be the one who paid for
+ the storage for it. Nevertheless, the paging activity, etc., is
+ paid for by the caller. */
+
+ struct object_desc *fdesc = object_to_object_desc ((struct object *) folio);
+ assert (fdesc->type == cap_folio);
+ assert (fdesc->oid % (FOLIO_OBJECTS + 1) == 0);
+
+ /* Free the objects. This bumps the version of any live objects.
+ This is correct as although the folio is being destroyed, when we
+ lookup an object via a capability, we only check that the
+ capability's version matches the object's version (we do not
+ check whether the folio is valid). */
+ /* As we free the objects, we also don't have to call cap_shootdown
+ here. */
+ int i;
+ for (i = 0; i < FOLIO_OBJECTS; i ++)
+ folio_object_free (activity, folio, i);
+
+ /* Update the allocation information. Namely, remove folio from the
+ activity's linked list. */
+ struct activity *storage_activity
+ = (struct activity *) cap_to_object (activity, &folio->activity);
+ assert (storage_activity);
+
+ struct folio *next = (struct folio *) cap_to_object (activity, &folio->next);
+ struct folio *prev = (struct folio *) cap_to_object (activity, &folio->prev);
+
+ if (prev)
+ prev->next = folio->next;
+ else
+ /* If there is no previous pointer, then FOLIO is the start of the
+ list and we need to update the head. */
+ storage_activity->folios = folio->next;
+
+ if (next)
+ next->prev = folio->prev;
+
+ /* XXX: Update accounting data. */
+
+
+ /* And free the folio. */
+ bit_dealloc (folios, fdesc->oid / (FOLIO_OBJECTS + 1));
+
+ fdesc->version ++;
+}
+
+void
+folio_object_alloc (struct activity *activity,
+ struct folio *folio,
+ int idx,
+ enum cap_type type,
+ struct object **objectp)
+{
+ debug (4, "allocating %s at %d", cap_type_string (type), idx);
+
+ assert (0 <= idx && idx < FOLIO_OBJECTS);
+
+ struct object_desc *fdesc = object_to_object_desc ((struct object *) folio);
+ assert (fdesc->type == cap_folio);
+ assert (fdesc->oid % (1 + FOLIO_OBJECTS) == 0);
+
+ oid_t oid = fdesc->oid + 1 + idx;
+
+ struct object *object = NULL;
+
+ /* Deallocate any existing object. */
+
+ if (folio->objects[idx].type == cap_activity
+ || folio->objects[idx].type == cap_thread)
+ /* These object types have state that needs to be explicitly
+ destroyed. */
+ {
+ object = object_find (activity, oid);
+
+ /* See if we need to destroy the object. */
+ switch (folio->objects[idx].type)
+ {
+ case cap_activity:
+ activity_destroy (activity, NULL, (struct activity *) object);
+ break;
+ case cap_thread:
+ thread_destroy (activity, (struct thread *) object);
+ break;
+ default:
+ assert (!"Object desc type does not match folio type.");
+ break;
+ }
+ }
+
+ if (! object)
+ object = object_find_soft (activity, oid);
+ if (object)
+ /* The object is in memory. Update its descriptor and revoke any
+ references to the old object. */
+ {
+ struct object_desc *odesc = object_to_object_desc (object);
+ assert (odesc->oid == oid);
+ assert (odesc->type == folio->objects[idx].type);
+
+ if (type == cap_void)
+ /* We are deallocating the object: free associated memory. */
+ {
+ memory_object_destroy (activity, object);
+ object = NULL;
+ }
+ else
+ {
+ struct cap cap = object_desc_to_cap (odesc);
+ cap_shootdown (activity, &cap);
+ }
+
+ odesc->type = type;
+ odesc->version = folio->objects[idx].version;
+
+ }
+
+ if (folio->objects[idx].type != cap_void)
+ /* We know that if an object's type is void then there are no
+ extant pointers to it. If there are only pointers in memory,
+ then we need to bump the memory version. Otherwise, we need to
+ bump the disk version. */
+ {
+ /* XXX: Check if we can just bump the in-memory version. */
+
+ /* Bump the disk version. */
+ folio->objects[idx].version ++;
+ }
+
+ /* Set the object's new type. */
+ folio->objects[idx].type = type;
+ /* Mark it as being empty. */
+ folio->objects[idx].content = 0;
+
+ if (objectp)
+ /* Caller wants to use the object. */
+ {
+ assert (type != cap_void);
+
+ if (! object)
+ object = object_find (activity, oid);
+ *objectp = object;
+ }
+}
diff --git a/viengoos/object.h b/viengoos/object.h
new file mode 100644
index 0000000..3e2b985
--- /dev/null
+++ b/viengoos/object.h
@@ -0,0 +1,270 @@
+/* object.h - Object store interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_OBJECT_H
+#define RM_OBJECT_H
+
+#include <l4.h>
+#include <error.h>
+#include <string.h>
+#include <assert.h>
+#include <hurd/cap.h>
+#include <hurd/folio.h>
+#include <stdint.h>
+
+#include "memory.h"
+#include "cap.h"
+#include "activity.h"
+#include "thread.h"
+
+/* Objects
+ -------
+
+ A folio is a unit of disk storage. Objects are allocated out of a
+ folio. Each folio consists of exactly FOLIO_OBJECTS objects each
+ PAGESIZE bytes in size. A folio also includes a 4 kb header (Thus
+ a folio consists of a total of FOLIO_OBJECTS + 1 pages of storage).
+ The header also describes the folio:
+
+ version
+ the activity to which the folio is allocated
+
+ It also describes each object, including:
+
+ the object's type,
+ whether it contains content,
+ whether it is discardable,
+ its version, and
+ its checksum
+
+ Object Versioning
+ -----------------
+
+ When an object (or a folio) is allocated we guarantee that the only
+ reference to the object is the newly created one. We achieve this
+ using object versioning. Each capability includes a version in
+ addition to the OID. For the capability to be valid, its version
+ must match the object's version. Further, the following variants
+ must hold:
+
+ A capability is valid if its version matches the designated
+ object's version, and
+
+ When an object is allocated, it is given a version that no
+ capability that designates the object has.
+
+ The implementation ensures these invariants. When a storage device
+ is initialized, all objects are set to have a version of 0 and a
+ type of cap_void. As all objects are new, there can be no
+ capabilities designating them. When an object is deallocated, if
+ the object's type is void, nothing is done. Otherwise, the
+ object's version is incremented and its type is set to void. When
+ an object is allocated, if the object's type is not void, the
+ object is deallocated. A reference is then generated using the
+ object's version. It is not possible to allocate an object of type
+ void.
+
+ So long as the version space is infinite, this suffices. However,
+ as we only use a dedicated number of version bits, we must
+ occasionally find a version number that satisfies the above
+ invariants. Without further help, this requires scanning storage.
+ To avoid this, when an object is initially allocated, we keep track
+ of all in-memory capabilities that reference an object (TODO). If
+ the object is deallocate and no capabilities have been written to
+ disk, then we just invalidate the in-memory capabilities, and there
+ is no need to increment the on-disk version. We know this since
+ when we allocated the object, there we no capabilities that had the
+ object's version and as we kept track of and invalidated all of the
+ capabilities that referenced the object and had that version.
+
+ If it happens that an object's version does overflow, then the
+ object cannot be immediately reused. We must scan storage and find
+ all capabilities that reference this object and invalidate them (by
+ changing their type to void). To avoid this, we can relocate the
+ entire folio. All of the other objects in the folio are replaced
+ by forwarders which transparently forward to the new folio (and
+ rewrite indirected capabilities).
+
+
+ When dereferencing a capability, the version of the folio is not
+ check. When a folio is deallocated, we need to guarantee that any
+ capabilities referencing not only the folio but also the objects
+ contained in the folio are invalidated. To achieve this, we
+ implicitly deallocate each object contained in the folio according
+ to the above rules. */
+
+
+/* An object descriptor. There is one for each in-memory object. */
+struct object_desc
+{
+ /* Every in-memory object lives in a hash hashed on its OID. */
+ void *locp;
+
+ /* The version and OID of the object. */
+ l4_word_t version : CAP_VERSION_BITS;
+ l4_word_t type : CAP_TYPE_BITS;
+
+ oid_t oid;
+
+ /* Each activity contains a list of the in-memory objects it is
+ currently allocated. */
+ struct
+ {
+ struct object_desc *next;
+ struct object_desc **prevp;
+ } activity;
+
+ /* Each allocated object is attached to either the global clean or
+ the global dirty list. */
+ struct
+ {
+ struct object_desc *next;
+ struct object_desc **prevp;
+ } glru;
+
+ /* Each allocated object is also kept on either its activity's clean
+ or its activity's dirty list. */
+ struct
+ {
+ struct object_desc *next;
+ struct object_desc **prevp;
+ } alru;
+};
+
+/* We keep an array of object descriptors. There is a linear mapping
+ between object desciptors and physical memory addresses. XXX: This
+ is cheap but problematic if there are large holes in the physical
+ memory map. */
+extern struct object_desc *object_descs;
+
+/* The global LRU lists. Every allocated frame is on one of these
+ two. */
+extern struct object_desc *dirty;
+extern struct object_desc *clean;
+
+/* Initialize the object sub-system. Must be called after grabbing
+ all of the memory. */
+extern void object_init (void);
+
+/* Return the address of the object corresponding to object OID,
+ reading it from backing store if required. */
+extern struct object *object_find (struct activity *activity, oid_t oid);
+
+/* If the object corresponding to object OID is in-memory, return it.
+ Otherwise, return NULL. Does not go to disk. */
+extern struct object *object_find_soft (struct activity *activity,
+ oid_t oid);
+
+/* Return the object corresponding to the object descriptor DESC. */
+#define object_desc_to_object(desc_) \
+ ({ \
+ struct object_desc *desc__ = (desc_); \
+ /* There is only one legal area for descriptors. */ \
+ assert ((uintptr_t) object_descs <= (uintptr_t) (desc__)); \
+ assert ((uintptr_t) (desc__) \
+ <= (uintptr_t) &object_descs[(last_frame - first_frame) \
+ / PAGESIZE]); \
+ \
+ (struct object *) (first_frame \
+ + (((uintptr_t) (desc__) - (uintptr_t) object_descs) \
+ / sizeof (struct object_desc)) * PAGESIZE); \
+ })
+
+/* Return the object descriptor corresponding to the object
+ OBJECT. */
+#define object_to_object_desc(object_) \
+ ({ \
+ struct object *object__ = (object_); \
+ /* Objects better be on a page boundary. */ \
+ assert (((uintptr_t) (object__) & (PAGESIZE - 1)) == 0); \
+ /* And they better be in memory. */ \
+ assert (first_frame <= (uintptr_t) (object__)); \
+ assert ((uintptr_t) (object__) <= last_frame); \
+ \
+ &object_descs[((uintptr_t) (object__) - first_frame) / PAGESIZE]; \
+ })
+
+/* Return a cap referencing the object designated by OBJECT_DESC. */
+static inline struct cap
+object_desc_to_cap (struct object_desc *desc)
+{
+ struct cap cap;
+
+ cap.type = desc->type;
+ cap.oid = desc->oid;
+ cap.version = desc->version;
+ cap.addr_trans = CAP_ADDR_TRANS_VOID;
+
+ return cap;
+}
+
+/* Return a cap referencing the object OBJECT. */
+static inline struct cap
+object_to_cap (struct object *object)
+{
+ return object_desc_to_cap (object_to_object_desc (object));
+}
+
+/* Allocate a folio to activity ACTIVITY. Returns NULL if not
+ possible. Otherwise a pointer to the in-memory folio. */
+extern struct folio *folio_alloc (struct activity *activity);
+
+/* Reassign the storage designated by FOLIO to the principal
+ NEW_PARENT. */
+extern void folio_reparent (struct activity *principal, struct folio *folio,
+ struct activity *new_parent);
+
+/* Destroy the folio FOLIO. */
+extern void folio_free (struct activity *activity, struct folio *folio);
+
+/* Allocate an object of type TYPE using the PAGE page from the folio
+ FOLIO. This implicitly destroys any existing object in that page.
+ If TYPE is cap_void, this is equivalent to calling
+ folio_object_free. If OBJECTP is not-NULL, then the in-memory
+ location of the object is returned in *OBJECTP. */
+extern void folio_object_alloc (struct activity *activity,
+ struct folio *folio, int page,
+ enum cap_type type,
+ struct object **objectp);
+
+/* Deallocate the object stored in page PAGE of folio FOLIO. */
+static inline void
+folio_object_free (struct activity *activity,
+ struct folio *folio, int page)
+{
+ folio_object_alloc (activity, folio, page, cap_void, NULL);
+}
+
+/* Deallocate the object OBJECT. */
+static inline void
+object_free (struct activity *activity, struct object *object)
+{
+ struct object_desc *odesc = object_to_object_desc (object);
+
+ int page = odesc->oid % (1 + FOLIO_OBJECTS) - 1;
+ oid_t foid = odesc->oid - page - 1;
+
+ struct folio *folio = (struct folio *) object_find (activity, foid);
+ assert (folio);
+
+ folio_object_free (activity, folio, page);
+}
+
+#endif
diff --git a/viengoos/output-none.c b/viengoos/output-none.c
new file mode 100644
index 0000000..d5ca80f
--- /dev/null
+++ b/viengoos/output-none.c
@@ -0,0 +1,33 @@
+/* output-none.c - A dummy output driver.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "output.h"
+
+struct output_driver no_output =
+ {
+ "none",
+ 0, /* init */
+ 0, /* deinit */
+ 0 /* putchar */
+ };
diff --git a/viengoos/output-serial.c b/viengoos/output-serial.c
new file mode 100644
index 0000000..a199407
--- /dev/null
+++ b/viengoos/output-serial.c
@@ -0,0 +1,160 @@
+/* output-serial.c - A serial port output driver.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Daniel Wagner.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <sys/io.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include <l4/ipc.h>
+
+#include "output.h"
+
+
+/* The base I/O ports for the serial device ports COM1 and COM2. */
+#define UART1_BASE 0x3f8
+#define UART2_BASE 0x2f8
+
+/* The selected base port. */
+static unsigned short int uart_base = UART1_BASE;
+
+/* The data register. */
+#define UART_DR (uart_base + 0)
+
+/* The interrupt enable and ID registers. */
+#define UART_IER (uart_base + 1)
+#define UART_IIR (uart_base + 2)
+
+/* The line and modem control and status registers. */
+#define UART_LCR (uart_base + 3)
+#define UART_MCR (uart_base + 4)
+#define UART_LSR (uart_base + 5)
+#define UART_LSR_THRE (1 << 5)
+#define UART_MSR (uart_base + 6)
+
+
+/* Baudrate divisor LSB and MSB registers. */
+#define UART_LSB (uart_base + 0)
+#define UART_MSB (uart_base + 1)
+
+/* The default speed setting. */
+#define UART_SPEED_MAX 115200
+#define UART_SPEED_MIN 50
+#define UART_SPEED_DEFAULT UART_SPEED_MAX
+
+
+static void
+serial_init (const char *driver_cfg)
+{
+ static const char delimiters[] = ",";
+ /* Twice the desired UART speed, to allow for .5 values. */
+ unsigned int uart_speed = 2 * UART_SPEED_DEFAULT;
+ unsigned int divider;
+
+ if (driver_cfg)
+ {
+ char *cfg = strdupa (driver_cfg);
+ char *token = strtok (cfg, delimiters);
+
+ while (token)
+ {
+ if (!strcmp (token, "uart1"))
+ uart_base = UART1_BASE;
+ if (!strcmp (token, "uart2"))
+ uart_base = UART2_BASE;
+ if (!strncmp (token, "speed=", 6))
+ {
+ char *tail;
+ unsigned long new_speed;
+
+ errno = 0;
+ new_speed = strtoul (&token[6], &tail, 0);
+
+ /* Allow .5 for speeds like 134.5. */
+ new_speed <<= 1;
+ if (tail[0] == '.' && tail[1] == '5')
+ {
+ new_speed++;
+ tail += 2;
+ }
+ if (!errno && !*tail
+ && new_speed > UART_SPEED_MIN && new_speed < UART_SPEED_MAX)
+ uart_speed = new_speed;
+ }
+ token = strtok (NULL, delimiters);
+ }
+ }
+
+ /* Parity bit. */
+ outb (0x80, UART_LCR);
+
+ /* FIXME: How long do we have to wait? */
+ l4_sleep (l4_time_period (L4_WORD_C (100000)));
+
+ /* Set baud rate. */
+ divider = (2 * UART_SPEED_MAX) / uart_speed;
+ outb ((divider >> 0) & 0xff, UART_LSB);
+ outb ((divider >> 8) & 0xff, UART_MSB);
+
+ /* Set 8,N,1. */
+ outb (0x03, UART_LCR);
+
+ /* Disable interrupts. */
+ outb (0x00, UART_IER);
+
+ /* Enable FIFOs. */
+ outb (0x07, UART_IIR);
+
+ /* Enable RX interrupts. */
+ outb (0x01, UART_IER);
+
+ inb (UART_IER);
+ inb (UART_IIR);
+ inb (UART_LCR);
+ inb (UART_MCR);
+ inb (UART_LSR);
+ inb (UART_MSR);
+}
+
+
+static void
+serial_putchar (int chr)
+{
+ while (!(inb (UART_LSR) & UART_LSR_THRE))
+ ;
+
+ outb (chr, UART_DR);
+
+ if (chr == '\n')
+ serial_putchar ('\r');
+}
+
+
+struct output_driver serial_output =
+ {
+ "serial",
+ serial_init,
+ 0, /* deinit */
+ serial_putchar
+ };
diff --git a/viengoos/output-stdio.c b/viengoos/output-stdio.c
new file mode 100644
index 0000000..4b5dc6d
--- /dev/null
+++ b/viengoos/output-stdio.c
@@ -0,0 +1,51 @@
+/* output-stdio.c - A unix stdio output driver.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "output.h"
+
+#include <stdio.h>
+#include <unistd.h>
+
+static void
+stdio_putchar (int chr)
+{
+ char c[1] = { chr };
+ write (1, &c, 1);
+}
+
+
+struct output_driver stdio_output =
+ {
+ "stdio",
+ 0, /* init */
+ 0, /* deinit */
+ stdio_putchar
+ };
+
+/* A list of all output drivers, terminated with a null pointer. */
+struct output_driver *output_drivers[] =
+ {
+ &stdio_output,
+ 0
+ };
diff --git a/viengoos/output-vga.c b/viengoos/output-vga.c
new file mode 100644
index 0000000..5746fdb
--- /dev/null
+++ b/viengoos/output-vga.c
@@ -0,0 +1,147 @@
+/* output-vga.c - A VGA output driver.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <sys/io.h>
+
+#include "output.h"
+
+#define VGA_VIDEO_MEM_BASE_ADDR 0x0B8000
+#define VGA_VIDEO_MEM_LENGTH 0x004000
+
+/* The default attribute is light grey on black. */
+#define VGA_DEF_ATTRIBUTE 7
+
+#define VGA_COLUMNS 80
+#define VGA_ROWS 25
+
+/* The CRTC Registers. XXX Depends on the I/O Address Select field.
+ However, the only need to use the other values is for compatibility
+ with monochrome adapters. */
+#define VGA_CRT_ADDR_REG 0x3d4
+#define VGA_CRT_DATA_REG 0x3d5
+
+/* The cursor position subregisters. */
+#define VGA_CRT_CURSOR_HIGH 0x0e
+#define VGA_CRT_CURSOR_LOW 0x0f
+
+
+/* Set the cursor position to POS, which is (x_pos + y_pos * width). */
+static void
+vga_set_cursor_pos (unsigned int pos)
+{
+ outb (VGA_CRT_CURSOR_HIGH, VGA_CRT_ADDR_REG);
+ outb (pos >> 8, VGA_CRT_DATA_REG);
+ outb (VGA_CRT_CURSOR_LOW, VGA_CRT_ADDR_REG);
+ outb (pos & 0xff, VGA_CRT_DATA_REG);
+}
+
+
+/* Get the cursor position, which is (x_pos + y_pos * width). */
+static unsigned int
+vga_get_cursor_pos (void)
+{
+ unsigned int pos;
+
+ outb (VGA_CRT_CURSOR_HIGH, VGA_CRT_ADDR_REG);
+ pos = inb (VGA_CRT_DATA_REG) << 8;
+ outb (VGA_CRT_CURSOR_LOW, VGA_CRT_ADDR_REG);
+ pos |= inb (VGA_CRT_DATA_REG) & 0xff;
+
+ return pos;
+}
+
+
+/* Global variables. */
+
+static int col;
+static int row;
+static char *video;
+
+
+static void
+vga_init (const char *cfg)
+{
+ unsigned int pos = vga_get_cursor_pos ();
+ col = pos % VGA_COLUMNS;
+ row = pos / VGA_COLUMNS;
+
+ /* FIXME: We are faulting in the video memory here. We must have a
+ way to give it back to the system eventually, for example to the
+ physical memory server. */
+ video = (char *) VGA_VIDEO_MEM_BASE_ADDR;
+}
+
+
+static void
+vga_putchar (int chr)
+{
+ unsigned int pos;
+
+ if (chr == '\n')
+ {
+ col = 0;
+ row++;
+ }
+ else
+ {
+ pos = row * VGA_COLUMNS + col;
+ video[2 * pos] = chr & 0xff;
+ video[2 * pos + 1] = VGA_DEF_ATTRIBUTE;
+ col++;
+ if (col == VGA_COLUMNS)
+ {
+ col = 0;
+ row++;
+ }
+ }
+
+ if (row == VGA_ROWS)
+ {
+ int i;
+
+ row--;
+ for (i = 0; i < VGA_COLUMNS * row; i++)
+ {
+ video[2 * i] = video[2 * (i + VGA_COLUMNS)];
+ video[2 * i + 1] = video[2 * (i + VGA_COLUMNS) + 1];
+ }
+ for (i = VGA_COLUMNS * row; i < VGA_COLUMNS * VGA_ROWS; i++)
+ {
+ video[2 * i] = ' ';
+ video[2 * i + 1] = VGA_DEF_ATTRIBUTE;
+ }
+ }
+
+ pos = row * VGA_COLUMNS + col;
+ vga_set_cursor_pos (pos);
+}
+
+
+struct output_driver vga_output =
+ {
+ "vga",
+ vga_init,
+ 0, /* deinit */
+ vga_putchar
+ };
diff --git a/viengoos/output.c b/viengoos/output.c
new file mode 100644
index 0000000..b426a7d
--- /dev/null
+++ b/viengoos/output.c
@@ -0,0 +1,283 @@
+/* output.c - Output routines.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdarg.h>
+#include <string.h>
+
+#include "output.h"
+
+/* The active output driver. */
+static struct output_driver *output;
+
+
+/* Activate the output driver NAME or the default one if NAME is a
+ null pointer. Must be called once at startup, before calling
+ putchar or any other output routine. Returns 0 if NAME is not a
+ valid output driver name, otherwise 1 on success. */
+int
+output_init (const char *driver)
+{
+ const char *driver_cfg = NULL;
+
+ if (output)
+ {
+ output_deinit ();
+ output = 0;
+ }
+
+ if (driver)
+ {
+ struct output_driver **out = &output_drivers[0];
+ while (*out)
+ {
+ unsigned int name_len = strlen ((*out)->name);
+ if (!strncmp (driver, (*out)->name, name_len))
+ {
+ const char *cfg = driver + name_len;
+ if (!*cfg || *cfg == ',')
+ {
+ if (*cfg)
+ driver_cfg = cfg + 1;
+ output = *out;
+ break;
+ }
+ }
+ out++;
+ }
+ if (!output)
+ return 0;
+ }
+ else
+ output = output_drivers[0];
+
+ if (output->init)
+ (*output->init) (driver_cfg);
+
+ return 1;
+}
+
+
+/* Deactivate the output driver. Must be called after the last time
+ putchar or any other output routine is called, and before control
+ is passed on to the L4 kernel. */
+void
+output_deinit (void)
+{
+ if (output && output->deinit)
+ (*output->deinit) ();
+}
+
+
+/* Print the single character CHR on the output device. */
+int
+putchar (int chr)
+{
+ if (!output)
+ output_init (0);
+
+ if (output->putchar)
+ (*output->putchar) (chr);
+
+ return 0;
+}
+
+
+int
+puts (const char *str)
+{
+ while (*str != '\0')
+ putchar (*(str++));
+
+ putchar ('\n');
+
+ return 0;
+}
+
+
+static void
+print_nr (unsigned long long nr, int base)
+{
+ static char *digits = "0123456789abcdef";
+ char str[30];
+ int i = 0;
+
+ do
+ {
+ str[i++] = digits[nr % base];
+ nr = nr / base;
+ }
+ while (nr);
+
+ i--;
+ while (i >= 0)
+ putchar (str[i--]);
+}
+
+
+static void
+print_signed_nr (long long nr, int base)
+{
+ unsigned long long unr;
+
+ if (nr < 0)
+ {
+ putchar ('-');
+ unr = -nr;
+ }
+ else
+ unr = nr;
+
+ print_nr (unr, base);
+}
+
+
+int
+vprintf (const char *fmt, va_list ap)
+{
+ const char *p = fmt;
+
+ while (*p != '\0')
+ {
+ if (*p != '%')
+ {
+ putchar (*(p++));
+ continue;
+ }
+
+ p++;
+ switch (*p)
+ {
+ case '%':
+ putchar ('%');
+ p++;
+ break;
+
+ case 'l':
+ p++;
+ if (*p != 'l')
+ {
+ putchar ('%');
+ putchar ('l');
+ putchar (*(p++));
+ continue;
+ }
+ p++;
+ switch (*p)
+ {
+ case 'o':
+ print_nr (va_arg (ap, unsigned long long), 8);
+ p++;
+ break;
+
+ case 'd':
+ case 'i':
+ print_signed_nr (va_arg (ap, long long), 10);
+ p++;
+ break;
+
+ case 'x':
+ case 'X':
+ print_nr (va_arg (ap, unsigned long long), 16);
+ p++;
+ break;
+
+ case 'u':
+ print_nr (va_arg (ap, unsigned long long), 10);
+ p++;
+ break;
+
+ default:
+ putchar ('%');
+ putchar ('l');
+ putchar ('l');
+ putchar (*(p++));
+ break;
+ }
+ break;
+
+ case 'o':
+ print_nr (va_arg (ap, unsigned int), 8);
+ p++;
+ break;
+
+ case 'd':
+ case 'i':
+ print_signed_nr (va_arg (ap, int), 10);
+ p++;
+ break;
+
+ case 'x':
+ case 'X':
+ print_nr (va_arg (ap, unsigned int), 16);
+ p++;
+ break;
+
+ case 'u':
+ print_nr (va_arg (ap, unsigned int), 10);
+ p++;
+ break;
+
+ case 'c':
+ putchar (va_arg (ap, int));
+ p++;
+ break;
+
+ case 's':
+ {
+ char *str = va_arg (ap, char *);
+ while (*str)
+ putchar (*(str++));
+ }
+ p++;
+ break;
+
+ case 'p':
+ putchar ('0');
+ putchar ('x');
+ print_nr ((unsigned int) va_arg (ap, void *), 16);
+ p++;
+ break;
+
+ default:
+ putchar ('%');
+ putchar (*p);
+ p++;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int
+printf (const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start (ap, fmt);
+ int r = vprintf (fmt, ap);
+ va_end (ap);
+
+ return r;
+}
diff --git a/viengoos/output.h b/viengoos/output.h
new file mode 100644
index 0000000..922ad28
--- /dev/null
+++ b/viengoos/output.h
@@ -0,0 +1,75 @@
+/* output.h - Output routines interfaces.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _OUTPUT_H
+#define _OUTPUT_H 1
+
+
+/* Every architecture must define at least one output driver, but might
+ define several. For each output driver, the name and operations on
+ the driver must be provided in the following structure. */
+
+struct output_driver
+{
+ const char *name;
+
+ /* Initialize the output device. */
+ void (*init) (const char *cfg);
+
+ /* Deinitialize the output device. */
+ void (*deinit) (void);
+
+ /* Output a character. */
+ void (*putchar) (int chr);
+};
+
+
+/* Every architecture must provide a list of all output drivers,
+ terminated by a driver structure which has a null pointer as its
+ name. */
+extern struct output_driver *output_drivers[];
+
+
+#include <stdarg.h>
+
+/* Activate the output driver DRIVER or the default one if DRIVER is a
+ null pointer. Must be called once at startup, before calling
+ putchar or any other output routine. DRIVER has the pattern
+ NAME[,CONFIG...], for example "serial,uart2,speed=9600". Returns 0
+ if DRIVER is not a valid output driver specification, otherwise 1
+ on success. */
+int output_init (const char *driver);
+
+
+/* Deactivate the output driver. Must be called after the last time
+ putchar or any other output routine is called. */
+void output_deinit (void);
+
+
+/* Print the single character CHR on the output device. */
+int putchar (int chr);
+
+int puts (const char *str);
+
+int vprintf (const char *fmt, va_list ap);
+
+int printf (const char *fmt, ...);
+
+#endif /* _OUTPUT_H */
diff --git a/viengoos/panic.c b/viengoos/panic.c
new file mode 100644
index 0000000..fc1da9c
--- /dev/null
+++ b/viengoos/panic.c
@@ -0,0 +1,42 @@
+/* panic.h - Panic implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <hurd/stddef.h>
+#include "output.h"
+#include "shutdown.h"
+#include "debug.h"
+
+extern const char program_name[];
+
+void
+panic_ (const char *func, int line, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start (ap, fmt);
+
+ printf ("%s:%s:%d error: ", program_name, func, line);
+ vprintf (fmt, ap);
+ putchar ('\n');
+ va_end (ap);
+ debugger ();
+ shutdown_machine ();
+}
+
diff --git a/viengoos/rm.h b/viengoos/rm.h
new file mode 100644
index 0000000..84c70d0
--- /dev/null
+++ b/viengoos/rm.h
@@ -0,0 +1,437 @@
+/* rm.h - Resource manager interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_RM_H
+#define RM_RM_H
+
+#include <assert.h>
+#include <l4.h>
+
+#include <hurd/types.h>
+#include <hurd/addr.h>
+#include <hurd/addr-trans.h>
+#include <hurd/startup.h>
+
+#include <errno.h>
+
+extern struct hurd_startup_data *__hurd_startup_data;
+
+enum rm_method_id
+ {
+ RM_putchar = 100,
+
+ RM_folio_alloc = 200,
+ RM_folio_free,
+ RM_folio_object_alloc,
+
+ RM_cap_copy = 300,
+ RM_cap_read,
+
+ RM_object_slot_copy_out = 400,
+ RM_object_slot_copy_in,
+ RM_object_slot_read,
+ };
+
+static inline const char *
+rm_method_id_string (enum rm_method_id id)
+{
+ switch (id)
+ {
+ case RM_putchar:
+ return "putchar";
+ case RM_folio_alloc:
+ return "folio_alloc";
+ case RM_folio_free:
+ return "folio_free";
+ case RM_folio_object_alloc:
+ return "folio_object_alloc";
+ case RM_cap_copy:
+ return "cap_copy";
+ case RM_cap_read:
+ return "cap_read";
+ case RM_object_slot_copy_out:
+ return "object_slot_copy_out";
+ case RM_object_slot_copy_in:
+ return "object_slot_copy_in";
+ case RM_object_slot_read:
+ return "object_slot_read";
+ default:
+ return "unknown method id";
+ }
+}
+
+/* Echo the character CHR on the manager console. */
+static inline void
+__attribute__((always_inline))
+rm_putchar (int chr)
+{
+ l4_msg_tag_t tag;
+
+ l4_accept (L4_UNTYPED_WORDS_ACCEPTOR);
+
+ tag = l4_niltag;
+ l4_msg_tag_set_label (&tag, RM_putchar);
+ l4_msg_tag_set_untyped_words (&tag, 1);
+ l4_msg_tag_set_typed_words (&tag, 0);
+ l4_set_msg_tag (tag);
+ l4_load_mr (1, (l4_word_t) chr);
+ /* XXX: We should send data to the log server. */
+ tag = l4_send (__hurd_startup_data->rm);
+}
+
+/* RPC template. ID is the method name, ARGS is the list of arguments
+ as normally passed to a function, LOADER is code to load the in
+ parameters, and STORER is code to load the out parameters. The
+ code assumes that the first MR contains the error code and returns
+ this as the function return value. If the IPC fails, EHOSTDOWN is
+ returned. */
+#define RPCX(id, args, loader, storer) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ rm_##id args \
+ { \
+ l4_msg_tag_t tag; \
+ l4_msg_t msg; \
+ \
+ l4_accept (L4_UNTYPED_WORDS_ACCEPTOR); \
+ \
+ l4_msg_clear (msg); \
+ tag = l4_niltag; \
+ l4_msg_tag_set_label (&tag, RM_##id); \
+ l4_msg_set_msg_tag (msg, tag); \
+ loader; \
+ l4_msg_load (msg); \
+ tag = l4_call (__hurd_startup_data->rm); \
+ \
+ if (l4_ipc_failed (tag)) \
+ return EHOSTDOWN; \
+ \
+ l4_word_t err; \
+ l4_store_mr (1, &err); \
+ \
+ int idx __attribute__ ((unused)); \
+ idx = 2; \
+ storer; \
+ \
+ return err; \
+ }
+
+/* Load the argument ARG, which is of type TYPE into MR IDX. */
+#define RPCLOAD(type, arg) \
+ { \
+ assert ((sizeof (arg) & (sizeof (l4_word_t) - 1)) == 0); \
+ union \
+ { \
+ type arg_value_; \
+ l4_word_t raw[sizeof (type) / sizeof (l4_word_t)]; \
+ } arg_union_ = { (arg) }; \
+ for (int i_ = 0; i_ < sizeof (type) / sizeof (l4_word_t); i_ ++) \
+ l4_msg_append_word (msg, arg_union_.raw[i_]); \
+ }
+
+/* Store the contents of MR IDX+1 into *ARG, which is of type TYPE.
+ NB: IDX is thus the return parameter number, not the message
+ register number; MR0 contains the error code. */
+#define RPCSTORE(type, arg) \
+ { \
+ assert ((sizeof (*arg) & (sizeof (l4_word_t) - 1)) == 0); \
+ union \
+ { \
+ type a__; \
+ l4_word_t *raw; \
+ } arg_union_ = { (arg) }; \
+ for (int i_ = 0; i_ < sizeof (*arg) / sizeof (l4_word_t); i_ ++) \
+ l4_store_mr (idx ++, &arg_union_.raw[i_]); \
+ }
+
+/* RPC with 2 in parameters and no out parameters. */
+#define RPC2(id, type1, arg1, type2, arg2) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ }, \
+ {})
+
+/* RPC with 3 in parameters and no out parameters. */
+#define RPC3(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ }, \
+ {})
+
+/* RPC with 4 in parameters and no out parameters. */
+#define RPC4(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ }, \
+ {})
+
+/* RPC with 5 in parameters and no out parameters. */
+#define RPC5(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ type5, arg5) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ RPCLOAD(type5, arg5) \
+ }, \
+ {})
+
+/* RPC with 6 in parameters and no out parameters. */
+#define RPC6(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ type5, arg5, \
+ type6, arg6) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, \
+ type6 arg6), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ RPCLOAD(type5, arg5) \
+ RPCLOAD(type6, arg6) \
+ }, \
+ {})
+
+/* RPC with 7 in parameters and no out parameters. */
+#define RPC7(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ type5, arg5, \
+ type6, arg6, \
+ type7, arg7) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, \
+ type6 arg6, type7 arg7), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ RPCLOAD(type5, arg5) \
+ RPCLOAD(type6, arg6) \
+ RPCLOAD(type7, arg7) \
+ }, \
+ {})
+
+/* RPC with 8 in parameters and no out parameters. */
+#define RPC8(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ type5, arg5, \
+ type6, arg6, \
+ type7, arg7, \
+ type8, arg8) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, \
+ type6 arg6, type7 arg7, type8 arg8), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ RPCLOAD(type5, arg5) \
+ RPCLOAD(type6, arg6) \
+ RPCLOAD(type7, arg7) \
+ RPCLOAD(type8, arg8) \
+ }, \
+ {})
+
+/* RPC with 9 in parameters and no out parameters. */
+#define RPC9(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ type5, arg5, \
+ type6, arg6, \
+ type7, arg7, \
+ type8, arg8, \
+ type9, arg9) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, \
+ type6 arg6, type7 arg7, type8 arg8, type9 arg9), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ RPCLOAD(type5, arg5) \
+ RPCLOAD(type6, arg6) \
+ RPCLOAD(type7, arg7) \
+ RPCLOAD(type8, arg8) \
+ RPCLOAD(type9, arg9) \
+ }, \
+ {})
+
+/* RPC with 2 in parameters and 2 out parameters. */
+#define RPC22(id, type1, arg1, \
+ type2, arg2, \
+ otype1, oarg1, \
+ otype2, oarg2) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, otype1 oarg1, otype2 oarg2), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ }, \
+ { \
+ RPCSTORE(otype1, oarg1) \
+ RPCSTORE(otype2, oarg2) \
+ })
+
+/* RPC with 3 in parameters and 2 out parameters. */
+#define RPC32(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ otype1, oarg1, \
+ otype2, oarg2) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, otype1 oarg1, otype2 oarg2), \
+ {l4_msg_tag_set_untyped_words (&tag, 3); \
+ RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ }, \
+ { \
+ RPCSTORE(otype1, oarg1) \
+ RPCSTORE(otype2, oarg2) \
+ })
+
+/* RPC with 4 in parameters and 2 out parameters. */
+#define RPC42(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ otype1, oarg1, \
+ otype2, oarg2) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
+ otype1 oarg1, otype2 oarg2), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ }, \
+ { \
+ RPCSTORE(otype1, oarg1) \
+ RPCSTORE(otype2, oarg2) \
+ })
+
+/* RPC with 5 in parameters and 2 out parameters. */
+#define RPC52(id, type1, arg1, \
+ type2, arg2, \
+ type3, arg3, \
+ type4, arg4, \
+ type5, arg5, \
+ otype1, oarg1, \
+ otype2, oarg2) \
+ RPCX(id, \
+ (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, \
+ otype1 oarg1, otype2 oarg2), \
+ {RPCLOAD(type1, arg1) \
+ RPCLOAD(type2, arg2) \
+ RPCLOAD(type3, arg3) \
+ RPCLOAD(type4, arg4) \
+ RPCLOAD(type5, arg5) \
+ }, \
+ { \
+ RPCSTORE(otype1, oarg1) \
+ RPCSTORE(otype2, oarg2) \
+ })
+
+/* Allocate a folio against PRINCIPAL. Store a capability in
+ the caller's cspace in slot FOLIO. */
+RPC2(folio_alloc, addr_t, principal, addr_t, folio)
+
+/* Free the folio designated by FOLIO. PRINCIPAL pays. */
+RPC2(folio_free, addr_t, principal, addr_t, folio)
+
+/* Allocate INDEXth object in folio FOLIO as an object of type TYPE.
+ PRINCIPAL is charged. If OBJECT_SLOT is not ADDR_VOID, then stores
+ a capability to the allocated object in OBJECT_SLOT. */
+RPC5(folio_object_alloc, addr_t, principal,
+ addr_t, folio, l4_word_t, index, l4_word_t, type, addr_t, object_slot)
+
+enum
+{
+ /* Use subpage in CAP_ADDR_TRANS (must be a subset of subpage in
+ SOURCE). */
+ CAP_COPY_COPY_SUBPAGE = 1 << 0,
+ /* Use guard in TARGET, not the guard in CAP_ADDR_TRANS. */
+ CAP_COPY_COPY_GUARD = 1 << 1,
+};
+
+#define THREAD_ASPACE_SLOT 0
+#define THREAD_ACTIVITY_SLOT 1
+
+/* Copy capability SOURCE to the capability slot TARGET.
+ ADDR_TRANS_FLAGS is a subset of CAP_COPY_GUARD, CAP_COPY_SUBPAGE,
+ and CAP_COPY_PRESERVE_GUARD, bitwise-ored. If CAP_COPY_GUARD is
+ set, the guard descriptor in CAP_ADDR_TRANS is used, if
+ CAP_COPY_PRESERVE_GUARD, the guard descriptor in TARGET, otherwise,
+ the guard descriptor is copied from SOURCE. If CAP_COPY_SUBPAGE is
+ set, the subpage descriptor in CAP_ADDR_TRANS is used, otherwise,
+ the subpage descriptor is copied from SOURCE. */
+RPC5(cap_copy, addr_t, principal, addr_t, target, addr_t, source,
+ l4_word_t, addr_trans_flags, struct cap_addr_trans, cap_addr_trans)
+
+/* Store the public bits of the capability CAP in *TYPE and
+ *CAP_ADDR_TRANS. */
+RPC22(cap_read, addr_t, principal, addr_t, cap,
+ l4_word_t *, type, struct cap_addr_trans *, cap_addr_trans)
+
+/* Copy the capability from slot SLOT of the object OBJECT (relative
+ to the start of the object's subpage) to slot TARGET. */
+RPC6(object_slot_copy_out, addr_t, principal,
+ addr_t, object, l4_word_t, slot, addr_t, target,
+ l4_word_t, flags, struct cap_addr_trans, cap_addr_trans)
+
+/* Copy the capability from slot SOURCE to slot INDEX of the object
+ OBJECT (relative to the start of the object's subpage). */
+RPC6(object_slot_copy_in, addr_t, principal,
+ addr_t, object, l4_word_t, index, addr_t, source,
+ l4_word_t, flags, struct cap_addr_trans, cap_addr_trans)
+
+/* Store the public bits of the capability slot SLOT of object
+ OBJECT in *TYPE and *CAP_ADDR. */
+RPC32(object_slot_read, addr_t, principal,
+ addr_t, object, l4_word_t, slot,
+ l4_word_t *, type, struct cap_addr_trans *, cap_addr_trans)
+
+#endif
diff --git a/viengoos/server.c b/viengoos/server.c
new file mode 100644
index 0000000..7b747e0
--- /dev/null
+++ b/viengoos/server.c
@@ -0,0 +1,515 @@
+/* server.c - Server loop implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <l4.h>
+#include <l4/pagefault.h>
+#include <hurd/cap.h>
+#include <hurd/stddef.h>
+
+#include "server.h"
+
+#include "rm.h"
+
+#include "cap.h"
+#include "object.h"
+#include "thread.h"
+#include "activity.h"
+#include "viengoos.h"
+
+#define DEBUG(level, format, args...) \
+ debug (level, "(%s) " format, \
+ l4_is_pagefault (msg_tag) ? "pagefault" \
+ : rm_method_id_string (label), \
+ ##args)
+
+void
+server_loop (void)
+{
+ int do_reply = 0;
+ l4_thread_id_t to = l4_nilthread;
+
+ for (;;)
+ {
+ l4_thread_id_t from = l4_anythread;
+ l4_msg_tag_t msg_tag;
+
+ /* Only accept untyped items--no strings, no mappings. */
+ l4_accept (L4_UNTYPED_WORDS_ACCEPTOR);
+ if (do_reply)
+ msg_tag = l4_reply_wait (to, &from);
+ else
+ msg_tag = l4_wait (&from);
+
+ if (l4_ipc_failed (msg_tag))
+ panic ("Receiving message failed: %u", (l4_error_code () >> 1) & 0x7);
+
+ l4_msg_t msg;
+ l4_msg_store (msg_tag, msg);
+ l4_word_t label;
+ label = l4_label (msg_tag);
+
+ /* By default we reply to the sender. */
+ to = from;
+ /* Unless explicitly overridden, don't reply. */
+ do_reply = 0;
+
+ /* Find the sender. */
+ struct thread *thread = thread_lookup (from);
+ assert (thread);
+
+ /* XXX: We can't charge THREAD's activity until we have the
+ activity object, however, getting the activity object may
+ require not only a few cycles but also storage and disk
+ activity. What we could do is have a special type of
+ activity called a charge that acts as the resource principal
+ and then once we find the real principal, we just add the
+ charges to the former to the latter. */
+ struct activity *activity
+ = (struct activity *) cap_to_object (root_activity,
+ &thread->activity);
+ if (! activity)
+ {
+ DEBUG (1, "Caller has no assigned activity");
+ continue;
+ }
+
+ if (l4_is_pagefault (msg_tag))
+ /* The label is not constant: it includes the type of fault.
+ Thus, it is difficult to incorporate it into the case
+ switch below. */
+ {
+ l4_word_t access;
+ l4_word_t ip;
+ l4_word_t fault = l4_pagefault (msg_tag, &access, &ip);
+ bool w = !! (access & L4_FPAGE_WRITABLE);
+
+ DEBUG (5, "Page fault at %x (ip = %x)", fault, ip);
+ l4_word_t page_addr = fault & ~(PAGESIZE - 1);
+
+ bool writable;
+ struct cap cap;
+ struct object *page = NULL;
+
+ cap = object_lookup_rel (activity, &thread->aspace,
+ ADDR (page_addr, ADDR_BITS - PAGESIZE_LOG2),
+ w ? cap_page : cap_rpage,
+ &writable);
+ if (cap.type != cap_void)
+ page = cap_to_object (activity, &cap);
+
+ if (! page)
+ {
+ do_debug (4)
+ as_dump_from (activity, &thread->aspace, NULL);
+ DEBUG (1, "Send %x.%x a SIGSEGV (ip: %x; fault: %x.%c)!",
+ l4_thread_no (from), l4_version (from), ip,
+ fault, w ? 'w' : 'r');
+ continue;
+ }
+
+ /* Only allow permitted rights through. */
+ if (w && ! writable)
+ {
+ DEBUG (1, "Sending SIGSEGV! Bad access.");
+ continue;
+ }
+
+ // DEBUG ("Replying with addr %x", (uintptr_t) page);
+ l4_map_item_t map_item
+ = l4_map_item (l4_fpage_add_rights (l4_fpage ((uintptr_t) page,
+ PAGESIZE),
+ access),
+ page_addr);
+
+ /* Formulate the reply message. */
+ l4_pagefault_reply_formulate (&map_item);
+
+ do_reply = 1;
+ continue;
+ }
+
+ struct activity *principal;
+
+ /* Check that the received message contains WORDS untyped words (not
+ including the principal!). */
+#define CHECK(words, words64) \
+ ({ \
+ expected_words = (words) + ((words64) + 1) * ARG64_WORDS; \
+ if (l4_untyped_words (msg_tag) != expected_words \
+ || l4_typed_words (msg_tag) != 0) \
+ { \
+ DEBUG (1, "Invalid format for %s: expected %d words, got %d", \
+ rm_method_id_string (label), \
+ expected_words, l4_untyped_words (msg_tag)); \
+ REPLY (EINVAL); \
+ } \
+ })
+
+ /* Reply with the error code ERR_ and set the number of untyped
+ words *in addition* to the return code to return to WORDS_. */
+#define REPLYW(err_, words_) \
+ do \
+ { \
+ if (! (err_)) \
+ /* No error: we should have read all the arguments. */ \
+ assert (args_read == expected_words); \
+ l4_msg_put_word (msg, 0, (err_)); \
+ l4_msg_set_untyped_words (msg, 1 + (words_)); \
+ do_reply = 1; \
+ goto out; \
+ } \
+ while (0)
+
+#define REPLY(err_) REPLYW((err_), 0)
+
+ /* Return the next word. */
+#define ARG() \
+ ({ \
+ assert (args_read < expected_words); \
+ l4_msg_word (msg, args_read ++); \
+ })
+
+ /* Return word WORD_. */
+#if L4_WORDSIZE == 32
+#define ARG64() \
+ ({ \
+ union { l4_uint64_t raw; struct { l4_uint32_t word[2]; }; } value_; \
+ value_.word[0] = ARG (); \
+ value_.word[1] = ARG (); \
+ value_.raw; \
+ })
+#define ARG64_WORDS 2
+#else
+#define ARG64(word_) ARG(word_)
+#define ARG64_WORDS 1
+#endif
+
+#define ARG_ADDR() ((addr_t) { ARG64() })
+
+ /* Return the capability slot corresponding to address ADDR. */
+ error_t SLOT_ (addr_t addr, struct cap **capp)
+ {
+ bool writable;
+ *capp = slot_lookup_rel (activity, &thread->aspace,
+ addr, -1, &writable);
+ if (! *capp)
+ {
+ DEBUG (1, "No capability slot at 0x%llx/%d",
+ addr_prefix (addr), addr_depth (addr));
+ return ENOENT;
+ }
+ if (! writable)
+ {
+ DEBUG (1, "Capability slot at 0x%llx/%d not writable",
+ addr_prefix (addr), addr_depth (addr));
+ return EPERM;
+ }
+
+ return 0;
+ }
+#define SLOT(addr_) \
+ ({ struct cap *SLOT_ret; \
+ error_t err = SLOT_ (addr_, &SLOT_ret); \
+ if (err) \
+ REPLY (err); \
+ SLOT_ret; \
+ })
+ /* Return a cap referencing the object at address ADDR of the
+ callers capability space if it is of type TYPE (-1 = don't care).
+ Whether the object is writable is stored in *WRITABLEP_. */
+ error_t CAP_ (addr_t addr, int type, bool *writablep, struct cap *cap)
+ {
+ *cap = cap_lookup_rel (principal, &thread->aspace, addr,
+ type, writablep);
+ if (type != -1 && cap->type != type)
+ {
+ DEBUG (1, "Addr 0x%llx/%d does not reference object of type %s",
+ addr_prefix (addr), addr_depth (addr),
+ cap_type_string (type));
+ as_dump_from (activity, &thread->aspace, "");
+ return ENOENT;
+ }
+ return 0;
+ }
+#define CAP(addr_, type_, writeablep_) \
+ ({ struct cap CAP_ret; \
+ error_t err = CAP_ (addr_, type_, writeablep_, &CAP_ret); \
+ if (err) \
+ REPLY (err); \
+ CAP_ret; \
+ })
+
+ error_t OBJECT_ (addr_t addr, int type, bool *writablep,
+ struct object **objectp)
+ {
+ struct cap cap;
+ error_t err = CAP_ (addr, type, writablep, &cap);
+ if (err)
+ return err;
+
+ *objectp = cap_to_object (principal, &cap);
+ if (! *objectp)
+ {
+ DEBUG (1, "Addr 0x%llx/%d, dangling pointer",
+ addr_prefix (addr), addr_depth (addr));
+ return ENOENT;
+ }
+
+ return 0;
+ }
+#define OBJECT(addr_, type_, writeablep_) \
+ ({ struct object *OBJECT_ret; \
+ error_t err = OBJECT_ (addr_, type_, writeablep_, &OBJECT_ret); \
+ if (err) \
+ REPLY (err); \
+ OBJECT_ret; \
+ })
+
+ int args_read = 0;
+ /* We set this to WORD64_WORDS; CHECK will set it
+ appropriately. */
+ int expected_words = ARG64_WORDS;
+
+ l4_msg_clear (msg);
+ if (label == RM_putchar)
+ {
+ /* We don't expect a principal. */
+ CHECK (1, -1);
+
+ int chr = l4_msg_word (msg, 0);
+ putchar (chr);
+
+ /* No reply needed. */
+ continue;
+ }
+
+ principal = activity;
+ addr_t principal_addr = ARG_ADDR ();
+ if (! ADDR_IS_VOID (principal_addr))
+ principal = (struct activity *) OBJECT (principal_addr,
+ cap_activity, NULL);
+
+ struct folio *folio;
+ struct object *object;
+ l4_word_t idx;
+ l4_word_t type;
+ bool r;
+ struct cap source;
+ addr_t source_addr;
+ struct cap *target;
+ addr_t target_addr;
+
+ DEBUG (5, "");
+
+ switch (label)
+ {
+ case RM_folio_alloc:;
+ CHECK (0, 1);
+ struct cap *folio_slot = SLOT (ARG_ADDR ());
+
+ folio = folio_alloc (principal);
+ if (! folio)
+ REPLY (ENOMEM);
+
+ r = cap_set (folio_slot, object_to_cap ((struct object *) folio));
+ assert (r);
+ REPLY (0);
+
+ case RM_folio_free:;
+ CHECK (0, 1);
+
+ folio = (struct folio *) OBJECT (ARG_ADDR (), cap_folio, NULL);
+ folio_free (principal, folio);
+
+ REPLY (0);
+
+ case RM_folio_object_alloc:;
+ CHECK (2, 2);
+
+ addr_t folio_addr = ARG_ADDR ();
+ folio = (struct folio *) OBJECT (folio_addr, cap_folio, NULL);
+
+ idx = ARG ();
+ if (idx >= FOLIO_OBJECTS)
+ REPLY (EINVAL);
+
+ type = ARG ();
+ if (! (CAP_TYPE_MIN <= type && type <= CAP_TYPE_MAX))
+ REPLY (EINVAL);
+
+ addr_t object_addr = ARG_ADDR ();
+ struct cap *object_slot = NULL;
+ if (! ADDR_IS_VOID (object_addr))
+ object_slot = SLOT (object_addr);
+
+ DEBUG (4, "(folio: %llx/%d, idx: %d, type: %s, target: %llx/%d)",
+ addr_prefix (folio_addr), addr_depth (folio_addr),
+ idx, cap_type_string (type),
+ addr_prefix (object_addr), addr_depth (object_addr));
+
+ folio_object_alloc (principal, folio, idx, type,
+ type == cap_void ? NULL : &object);
+
+ if (type != cap_void && object_slot)
+ {
+ r = cap_set (object_slot, object_to_cap (object));
+ assert (r);
+ }
+
+ REPLY (0);
+
+ case RM_object_slot_copy_out:;
+ case RM_object_slot_copy_in:;
+ CHECK (3, 2);
+
+ addr_t addr = ARG_ADDR ();
+ source = CAP (addr, -1, NULL);
+ idx = ARG ();
+
+ if (idx >= cap_type_num_slots[source.type])
+ REPLY (EINVAL);
+
+ if (source.type == cap_cappage || source.type == cap_rcappage)
+ /* Ensure that idx falls within the subpage. */
+ {
+ if (idx >= CAP_SUBPAGE_SIZE (&source))
+ {
+ DEBUG (1, "index (%d) >= subpage size (%d)",
+ idx, CAP_SUBPAGE_SIZE (&source));
+ REPLY (EINVAL);
+ }
+
+ idx += CAP_SUBPAGE_OFFSET (&source);
+ }
+
+ object = cap_to_object (principal, &source);
+ if (! object)
+ {
+ DEBUG (1, CAP_FMT " maps to void", CAP_PRINTF (&source));
+ REPLY (EINVAL);
+ }
+
+ if (label == RM_object_slot_copy_out)
+ {
+ source_addr = addr;
+
+ source = ((struct cap *) object)[idx];
+ target_addr = ARG_ADDR ();
+ target = SLOT (target_addr);
+ }
+ else
+ {
+ target_addr = addr;
+
+ source_addr = ARG_ADDR ();
+ source = CAP (source_addr, -1, NULL);
+ target = &((struct cap *) object)[idx];
+ }
+
+ goto cap_copy_body;
+
+ case RM_cap_copy:;
+ CHECK (2, 2);
+
+ target_addr = ARG_ADDR ();
+ target = SLOT (target_addr);
+
+ source_addr = ARG_ADDR ();
+ source = CAP (source_addr, -1, NULL);
+
+ cap_copy_body:;
+
+ l4_word_t flags = ARG ();
+ if ((flags & ~(CAP_COPY_COPY_SUBPAGE | CAP_COPY_COPY_GUARD)))
+ REPLY (EINVAL);
+
+ struct cap_addr_trans addr_trans;
+ addr_trans.raw = ARG ();
+
+ DEBUG (4, "(%llx/%d, %llx/%d, %s|%s, {%llx/%d %d/%d})",
+ addr_prefix (target_addr), addr_depth (target_addr),
+ addr_prefix (source_addr), addr_depth (source_addr),
+ flags & CAP_COPY_COPY_GUARD ? "copy" : "preserve",
+ flags & CAP_COPY_COPY_SUBPAGE ? "copy" : "preserve",
+ CAP_ADDR_TRANS_GUARD (addr_trans),
+ CAP_ADDR_TRANS_GUARD_BITS (addr_trans),
+ CAP_ADDR_TRANS_SUBPAGE (addr_trans),
+ CAP_ADDR_TRANS_SUBPAGES (addr_trans));
+
+ bool r = cap_copy_x (principal,
+ target, ADDR_VOID,
+ source, ADDR_VOID,
+ flags, addr_trans);
+ if (r)
+ REPLY (0);
+ else
+ REPLY (EINVAL);
+
+ case RM_object_slot_read:
+ CHECK (1, 1);
+
+ /* We don't look up the argument directly as we need to
+ respect any subpag specification for cappages. */
+ source = CAP (ARG_ADDR (), -1, NULL);
+ l4_word_t idx = ARG ();
+
+ object = cap_to_object (activity, &source);
+ if (! object)
+ REPLY (EINVAL);
+
+ if (idx >= cap_type_num_slots[source.type])
+ REPLY (EINVAL);
+
+ if (source.type == cap_cappage || source.type == cap_rcappage)
+ /* Ensure that idx falls within the subpage. */
+ {
+ if (idx >= CAP_SUBPAGE_SIZE (&source))
+ REPLY (EINVAL);
+
+ idx += CAP_SUBPAGE_OFFSET (&source);
+ }
+
+ source = ((struct cap *) object)[idx];
+
+ goto cap_read_body;
+
+ case RM_cap_read:;
+ CHECK (0, 1);
+
+ source = CAP (ARG_ADDR (), -1, NULL);
+
+ cap_read_body:
+
+ l4_msg_put_word (msg, 1, source.type);
+ l4_msg_put_word (msg, 2, *(l4_word_t *) &source.addr_trans);
+
+ REPLYW (0, 2);
+
+ default:
+ /* XXX: Don't panic when running production code. */
+ DEBUG (1, "Didn't handle message from %x.%x with label %d",
+ l4_thread_no (from), l4_version (from), label);
+ }
+
+ out:
+ if (do_reply)
+ l4_msg_load (msg);
+ }
+}
diff --git a/viengoos/server.h b/viengoos/server.h
new file mode 100644
index 0000000..e64ff14
--- /dev/null
+++ b/viengoos/server.h
@@ -0,0 +1,27 @@
+/* server.h - Server loop interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_SERVER_H
+#define RM_SERVER_H
+
+/* Begin serving requests. Never returns. */
+extern void server_loop (void) __attribute__ ((noreturn));
+
+#endif
diff --git a/viengoos/shutdown.c b/viengoos/shutdown.c
new file mode 100644
index 0000000..d61fd6e
--- /dev/null
+++ b/viengoos/shutdown.c
@@ -0,0 +1,83 @@
+/* shutdown.c - System shutdown functions.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <l4.h>
+
+#ifdef _L4_TEST_ENVIRONMENT
+extern void abort (void);
+#endif
+
+#include "shutdown.h"
+
+
+/* Reset the machine at failure, instead halting it. */
+int shutdown_reset;
+
+/* Time to sleep before reset. */
+#define SLEEP_TIME 10
+
+
+#ifdef _L4_TEST_ENVIRONMENT
+void
+reset (void)
+{
+ abort ();
+}
+#endif
+
+void
+halt (void)
+{
+#ifdef _L4_TEST_ENVIRONMENT
+ abort ();
+#else
+ l4_sleep (L4_NEVER);
+#endif
+}
+
+
+void
+shutdown_machine (void)
+{
+ if (shutdown_reset)
+ {
+ l4_time_t timespec = l4_time_period (SLEEP_TIME * 1000UL * 1000UL);
+
+ l4_sleep (timespec);
+ reset ();
+ }
+ else
+ halt ();
+
+ /* Never reached. */
+ if (shutdown_reset)
+ {
+ printf ("Unable to reset this machine.\n");
+ halt ();
+ }
+
+ printf ("Unable to halt this machine.\n");
+ while (1)
+ ;
+}
diff --git a/viengoos/shutdown.h b/viengoos/shutdown.h
new file mode 100644
index 0000000..a162311
--- /dev/null
+++ b/viengoos/shutdown.h
@@ -0,0 +1,42 @@
+/* shutdown.h - System shutdown functions interfaces.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _SHUTDOWN_H
+#define _SHUTDOWN_H 1
+
+/* Every architecture must provide the following functions. */
+
+/* Reset the machine. */
+void reset (void);
+
+/* Halt the machine. */
+void halt (void);
+
+
+/* The generic code defines these functions. */
+
+/* Reset the machine at failure, instead halting it. */
+extern int shutdown_reset;
+
+/* End the program with a failure. This can halt or reset the
+ system. */
+extern void __attribute__ ((__noreturn__)) shutdown_machine (void);
+
+#endif /* _SHUTDOWN_H */
diff --git a/viengoos/sigma0.c b/viengoos/sigma0.c
new file mode 100644
index 0000000..2a1973d
--- /dev/null
+++ b/viengoos/sigma0.c
@@ -0,0 +1,148 @@
+/* Client code for sigma0.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <l4.h>
+
+#include <hurd/stddef.h>
+
+/* The thread ID of sigma0. */
+#define SIGMA0_TID (l4_global_id (l4_thread_user_base (), 1))
+
+/* The message label for the sigma0 request page operation. This is
+ -6 in the upper 24 bits. */
+#define SIGMA0_RPC (0xffa0)
+
+/* The message label for undocumented sigma0 operations. This is
+ -1001 in the upper 24 bits. */
+#define SIGMA0_EXT (0xc170)
+
+/* For undocumented operations, this is the meaning of the first
+ untyped word in the message (MR1). */
+#define SIGMA0_EXT_SET_VERBOSITY 1
+#define SIGMA0_EXT_DUMP_MEMORY 2
+
+/* Set the verbosity level in sigma0. The only levels used currently
+ are 1 to 3. Returns 0 on success, otherwise an IPC error code. */
+void
+sigma0_set_verbosity (l4_word_t level)
+{
+ l4_msg_t msg;
+ l4_msg_tag_t tag;
+
+ l4_msg_clear (msg);
+ l4_set_msg_label (msg, SIGMA0_EXT);
+ l4_msg_append_word (msg, SIGMA0_EXT_SET_VERBOSITY);
+ l4_msg_append_word (msg, level);
+ l4_msg_load (msg);
+ tag = l4_send (SIGMA0_TID);
+ if (l4_ipc_failed (tag))
+ panic ("%s: request failed during %s: %u", __func__,
+ l4_error_code () & 1 ? "receive" : "send",
+ (l4_error_code () >> 1) & 0x7);
+}
+
+
+/* Request a memory dump from sigma0. If WAIT is true, wait until the
+ dump is completed before continuing. */
+void
+sigma0_dump_memory (int wait)
+{
+ l4_msg_t msg;
+ l4_msg_tag_t tag;
+
+ l4_msg_clear (msg);
+ l4_set_msg_label (msg, SIGMA0_EXT);
+ l4_msg_append_word (msg, SIGMA0_EXT_DUMP_MEMORY);
+ l4_msg_append_word (msg, wait);
+ l4_msg_load (msg);
+ if (wait)
+ tag = l4_call (SIGMA0_TID);
+ else
+ tag = l4_send (SIGMA0_TID);
+ if (l4_ipc_failed (tag))
+ panic ("%s: request failed during %s: %u", __func__,
+ l4_error_code () & 1 ? "receive" : "send",
+ (l4_error_code () >> 1) & 0x7);
+}
+
+
+/* Request the fpage FPAGE from sigma0. */
+void
+sigma0_get_fpage (l4_fpage_t fpage)
+{
+ l4_msg_t msg;
+ l4_msg_tag_t tag;
+ l4_map_item_t map_item;
+
+ l4_accept (l4_map_grant_items (L4_COMPLETE_ADDRESS_SPACE));
+ l4_msg_clear (msg);
+ l4_set_msg_label (msg, SIGMA0_RPC);
+ l4_msg_append_word (msg, fpage);
+ l4_msg_append_word (msg, L4_DEFAULT_MEMORY);
+ l4_msg_load (msg);
+ tag = l4_call (SIGMA0_TID);
+ if (l4_ipc_failed (tag))
+ panic ("%s: request failed during %s: %u", __func__,
+ l4_error_code () & 1 ? "receive" : "send",
+ (l4_error_code () >> 1) & 0x7);
+ if (l4_untyped_words (tag) != 0 || l4_typed_words (tag) != 2)
+ panic ("%s: invalid format of sigma0 reply", __func__);
+ l4_msg_store (tag, msg);
+ l4_msg_get_map_item (msg, 0, &map_item);
+ if (l4_is_nil_fpage (l4_map_item_snd_fpage (map_item)))
+ panic ("%s: sigma0 rejected mapping", __func__);
+ if (l4_address (fpage) != l4_address (l4_map_item_snd_fpage (map_item)))
+ panic ("%s: sigma0 returned wrong address 0x%x (expected 0x%x)",
+ __func__, l4_address (l4_map_item_snd_fpage (map_item)),
+ l4_address (fpage));
+}
+
+
+/* Request an fpage of the size 2^SIZE from sigma0. The fpage will be
+ fully accessible. */
+l4_fpage_t
+sigma0_get_any (unsigned int size)
+{
+ l4_msg_t msg;
+ l4_msg_tag_t tag;
+ l4_map_item_t map_item;
+ l4_fpage_t fpage = l4_fpage_log2 (-1, size);
+
+ l4_accept (l4_map_grant_items (L4_COMPLETE_ADDRESS_SPACE));
+ l4_msg_clear (msg);
+ l4_set_msg_label (msg, SIGMA0_RPC);
+ l4_msg_append_word (msg, fpage);
+ l4_msg_append_word (msg, L4_DEFAULT_MEMORY);
+ l4_msg_load (msg);
+ tag = l4_call (SIGMA0_TID);
+ if (l4_ipc_failed (tag))
+ panic ("%s: request failed during %s: %u", __func__,
+ l4_error_code () & 1 ? "receive" : "send",
+ (l4_error_code () >> 1) & 0x7);
+ if (l4_untyped_words (tag) != 0
+ || l4_typed_words (tag) != 2)
+ panic ("%s: invalid format of sigma0 reply", __func__);
+ l4_msg_store (tag, msg);
+ l4_msg_get_map_item (msg, 0, &map_item);
+ return l4_map_item_snd_fpage (map_item);
+}
diff --git a/viengoos/sigma0.h b/viengoos/sigma0.h
new file mode 100644
index 0000000..960b85f
--- /dev/null
+++ b/viengoos/sigma0.h
@@ -0,0 +1,42 @@
+/* Client code for sigma0.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <l4.h>
+
+/* Set the verbosity level in sigma0. The only levels used currently
+ are 1 to 3. Returns 0 on success, otherwise an IPC error code. */
+void sigma0_set_verbosity (l4_word_t level);
+
+
+/* Request a memory dump from sigma0. If WAIT is true, wait until the
+ dump is completed before continuing. */
+void sigma0_dump_memory (int wait);
+
+
+/* Request the fpage FPAGE from sigma0. */
+void sigma0_get_fpage (l4_fpage_t fpage);
+
+
+/* Request an fpage of the size 2^SIZE from sigma0. The fpage will be
+ fullly accessible. */
+l4_fpage_t sigma0_get_any (unsigned int size_log2);
diff --git a/viengoos/t-as.c b/viengoos/t-as.c
new file mode 100644
index 0000000..281edeb
--- /dev/null
+++ b/viengoos/t-as.c
@@ -0,0 +1,322 @@
+#define _L4_TEST_MAIN
+#include "t-environment.h"
+
+#include <hurd/types.h>
+#include <hurd/stddef.h>
+
+#include "memory.h"
+#include "cap.h"
+#include "object.h"
+#include "activity.h"
+#include "as.h"
+
+static struct activity *root_activity;
+
+/* Current working folio. */
+static struct folio *folio;
+static int object;
+
+static struct as_insert_rt
+allocate_object (enum cap_type type, addr_t addr)
+{
+ if (! folio || object == FOLIO_OBJECTS)
+ {
+ folio = folio_alloc (root_activity);
+ object = 0;
+ }
+
+ struct object *o;
+ folio_object_alloc (root_activity, folio, object ++, type, &o);
+
+ struct as_insert_rt rt;
+ rt.cap = object_to_cap (o);
+ /* We don't need to set RT.STORAGE as as_insert doesn't require it
+ for the internal interface implementations. */
+ rt.storage = ADDR (0, 0);
+ return rt;
+}
+
+extern char _start;
+extern char _end;
+
+struct alloc
+{
+ addr_t addr;
+ int type;
+};
+
+static void
+try (struct alloc *allocs, int count, bool dump)
+{
+ struct cap aspace = { .type = cap_void };
+ struct cap caps[count];
+
+ void do_check (struct cap *cap, bool writable, int i, bool present)
+ {
+ if (present)
+ {
+ assert (cap);
+
+ assert (cap->type == caps[i].type);
+
+ struct object *object = cap_to_object (root_activity, cap);
+ struct object_desc *odesc = object_to_object_desc (object);
+ if (caps[i].type != cap_void)
+ assert (odesc->oid == caps[i].oid);
+
+ if (cap->type == cap_page)
+ assert (* (unsigned char *) object == i);
+ }
+ else
+ {
+ if (cap)
+ {
+ struct object *object = cap_to_object (root_activity, cap);
+ assert (! object);
+ assert (cap->type == cap_void);
+ }
+ }
+ }
+
+ int i;
+ for (i = 0; i < count; i ++)
+ {
+ switch (allocs[i].type)
+ {
+ case cap_folio:
+ caps[i] = object_to_cap ((struct object *)
+ folio_alloc (root_activity));
+ break;
+ case cap_void:
+ caps[i].type = cap_void;
+ break;
+ case cap_page:
+ case cap_rpage:
+ case cap_cappage:
+ case cap_rcappage:
+ caps[i] = allocate_object (allocs[i].type, allocs[i].addr).cap;
+ break;
+ default:
+ assert (! " Bad type");
+ }
+
+ struct object *object = cap_to_object (root_activity, &caps[i]);
+ if (caps[i].type == cap_page)
+ memset (object, i, PAGESIZE);
+
+ as_insert (root_activity, &aspace, allocs[i].addr,
+ object_to_cap (object), ADDR_VOID,
+ allocate_object);
+
+ if (dump)
+ {
+ printf ("After inserting: " ADDR_FMT "\n",
+ ADDR_PRINTF (allocs[i].addr));
+ as_dump_from (root_activity, &aspace, NULL);
+ }
+
+ int j;
+ for (j = 0; j < count; j ++)
+ {
+ bool writable;
+ struct cap *cap = slot_lookup_rel (root_activity,
+ &aspace, allocs[j].addr, -1,
+ &writable);
+ do_check (cap, writable, j, j <= i);
+
+ struct cap c;
+ c = object_lookup_rel (root_activity,
+ &aspace, allocs[j].addr, -1,
+ &writable);
+ do_check (&c, writable, j, j <= i);
+ }
+ }
+
+ /* Free the allocated objects. */
+ for (i = 0; i < count; i ++)
+ {
+ /* Make sure allocs[i].addr maps to PAGES[i]. */
+ bool writable;
+ struct cap *cap = slot_lookup_rel (root_activity,
+ &aspace, allocs[i].addr, -1,
+ &writable);
+ do_check (cap, writable, i, true);
+
+ struct cap c = object_lookup_rel (root_activity,
+ &aspace, allocs[i].addr, -1,
+ &writable);
+ do_check (&c, writable, i, true);
+
+ /* Void the capability in the returned capability slot. */
+ cap->type = cap_void;
+
+ /* The page should no longer be found. */
+ c = object_lookup_rel (root_activity, &aspace, allocs[i].addr, -1,
+ NULL);
+ assert (c.type == cap_void);
+
+ /* Restore the capability slot. */
+ cap->type = allocs[i].type;
+
+ /* The page should be back. */
+ cap = slot_lookup_rel (root_activity,
+ &aspace, allocs[i].addr, -1, &writable);
+ do_check (cap, writable, i, true);
+
+ c = object_lookup_rel (root_activity,
+ &aspace, allocs[i].addr, -1, &writable);
+ do_check (&c, writable, i, true);
+
+ /* Finally, free the object. */
+ switch (caps[i].type)
+ {
+ case cap_folio:
+ folio_free (root_activity,
+ (struct folio *) cap_to_object (root_activity,
+ &caps[i]));
+ break;
+ case cap_void:
+ break;
+ default:
+ object_free (root_activity, cap_to_object (root_activity, &caps[i]));
+ break;
+ }
+
+ /* Check the state of all pages. */
+ int j;
+ for (j = 0; j < count; j ++)
+ {
+ bool writable;
+ cap = slot_lookup_rel (root_activity,
+ &aspace, allocs[j].addr, -1, &writable);
+ /* We should always get the slot (but it won't always
+ designate an object). */
+ assert (cap);
+
+ struct cap c;
+ c = object_lookup_rel (root_activity,
+ &aspace, allocs[j].addr, -1, &writable);
+ do_check (&c, writable, j, i < j);
+ }
+ }
+}
+
+void
+test (void)
+{
+ struct cap *cap = NULL;
+
+ if (! memory_reserve ((l4_word_t) &_start, (l4_word_t) &_end,
+ memory_reservation_self))
+ panic ("Failed to reserve memory for self.");
+
+ memory_grab ();
+ object_init ();
+
+ /* Create the root activity. */
+ folio = folio_alloc (NULL);
+ if (! folio)
+ panic ("Failed to allocate storage for the initial task!");
+
+ struct cap c = allocate_object (cap_activity, ADDR_VOID).cap;
+ root_activity = (struct activity *) cap_to_object (root_activity, &c);
+
+ folio_reparent (root_activity, folio, root_activity);
+
+ {
+ printf ("Checking slot_lookup_rel... ");
+
+ /* We have an empty address space. When we use slot_lookup_rel
+ and specify that we don't care what type of capability we get,
+ we should get the capability slot--if the guard is right. */
+ struct cap aspace = { type: cap_void };
+
+ l4_word_t addr = 0xFA000;
+ bool writable;
+ cap = slot_lookup_rel (root_activity, &aspace, ADDR (addr, ADDR_BITS),
+ -1, &writable);
+ assert (cap == NULL);
+
+ /* Set the root to designate ADDR. */
+ bool r = CAP_SET_GUARD (&aspace, addr, ADDR_BITS);
+ assert (r);
+
+ cap = slot_lookup_rel (root_activity, &aspace, ADDR (addr, ADDR_BITS),
+ -1, &writable);
+ assert (cap == &aspace);
+ assert (writable);
+
+ printf ("ok.\n");
+ }
+
+ printf ("Checking as_insert... ");
+ {
+ struct alloc allocs[] =
+ { { ADDR (1 << (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2),
+ ADDR_BITS - FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2), cap_folio },
+ { ADDR (0x100000003, 63), cap_page },
+ { ADDR (0x100000004, 63), cap_page },
+ { ADDR (0x1000 /* 4k. */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x00100000 /* 1MB */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x01000000 /* 16MB */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x10000000 /* 256MB */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40000000 /* 1000MB */, ADDR_BITS - PAGESIZE_LOG2),
+ cap_page },
+ { ADDR (0x40000000 - 0x2000 /* 1000MB - 4k */,
+ ADDR_BITS - PAGESIZE_LOG2),
+ cap_page },
+ { ADDR (0x40001000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40003000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40002000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40009000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40008000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40007000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x40006000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
+ { ADDR (0x00101000 /* 1MB + 4k. */, ADDR_BITS - PAGESIZE_LOG2),
+ cap_page },
+ { ADDR (0x00FF0000 /* 1MB - 4k. */, ADDR_BITS - PAGESIZE_LOG2),
+ cap_page },
+ };
+
+ try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
+ }
+
+ {
+ struct alloc allocs[] =
+ { { ADDR (1, ADDR_BITS), cap_page },
+ { ADDR (2, ADDR_BITS), cap_page },
+ { ADDR (3, ADDR_BITS), cap_page },
+ { ADDR (4, ADDR_BITS), cap_page },
+ { ADDR (5, ADDR_BITS), cap_page },
+ { ADDR (6, ADDR_BITS), cap_page },
+ { ADDR (7, ADDR_BITS), cap_page },
+ { ADDR (8, ADDR_BITS), cap_page }
+ };
+
+ try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
+ }
+
+ {
+ /* Induce a long different guard. */
+ struct alloc allocs[] =
+ { { ADDR (0x100000000, 51), cap_cappage },
+ { ADDR (0x80000, 44), cap_folio }
+ };
+
+ try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
+ }
+
+ {
+ /* Induce subpage allocation. */
+ struct alloc allocs[] =
+ { { ADDR (0x80000, 44), cap_folio },
+ { ADDR (0x1000, 51), cap_page },
+ { ADDR (0x10000, 51), cap_page },
+ { ADDR (0x2000, 51), cap_page }
+ };
+
+ try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
+ }
+
+ printf ("ok.\n");
+}
diff --git a/viengoos/t-environment.h b/viengoos/t-environment.h
new file mode 100644
index 0000000..87175aa
--- /dev/null
+++ b/viengoos/t-environment.h
@@ -0,0 +1,284 @@
+/* t-environment.h - Setup test environment.
+ */
+
+/* This file is divided into two sections: common declarations and
+ start up code.
+
+ The former section creates the testing environment that influences
+ every file. By including this file using the CPP -include
+ argument, most files shouldn't have to be changed to be made aware
+ of the testing environment; they will just compile. For
+ functionality that does need to be changed, this file also defines
+ the _L4_TEST_ENVIRONMENT macro. Code can test this to decide which
+ implementation to use.
+
+ The latter section consists of start up code and convenience code.
+ Whatever file is the mail file should first define the
+ _L4_TEST_MAIN macro and then include this file. The program then
+ need only define a test function. */
+
+#ifndef T_ENVIRONMENT_H
+#define T_ENVIRONMENT_H
+
+#include <hurd/stddef.h>
+#include <stdio.h>
+
+/* FIXME: We can not include stdlib.h, as this wants to suck in the
+ whole libl4 headers via pthread (which currently fails as pthread
+ doesn't include the header files). Ouch! */
+extern char *getenv (const char *name);
+extern void exit (int status) __attribute__ ((__noreturn__));
+
+
+/* A type that behaves like char * alias-wise, but has the width of
+ the system word size. */
+#ifdef __i386__
+typedef unsigned int __attribute__((__mode__ (__SI__))) word_t;
+typedef word_t __attribute__((__may_alias__)) word_ma_t;
+#else
+#error not ported to this architecture
+#endif
+
+/* Our kernel interface page. */
+#ifdef __i386__
+static const word_ma_t environment_kip[] =
+ {
+ /* 0x0000 */ 0x4be6344c, 0x84050000, 0x00000000, 0x00000140,
+ /* 0x0010 */ 0x0014fab0, 0xf0129720, 0x00000000, 0x00000000,
+ /* 0x0020 */ 0x00000000, 0x00041c70, 0x00040000, 0x000483a0,
+ /* 0x0030 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /* 0x0040 */ 0x00000000, 0x00300000, 0x00300000, 0x0030ba90,
+ /* 0x0050 */ 0x00000000, 0x01d00007, 0x00000000, 0x00000000,
+ /* 0x0060 */ 0x00000000, 0x00000000, 0x00100200, 0x0014f000,
+ /* 0x0070 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /* 0x0080 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /* 0x0090 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /* 0x00a0 */ 0x00000000, 0x00000000, 0x000c2401, 0x0000000c,
+ /* 0x00b0 */ 0x00000000, 0x00000000, 0x00032600, 0x00000120,
+ /* 0x00c0 */ 0x00000000, 0x03001011, 0x00401006, 0x40000001,
+ /* 0x00d0 */ 0x00000910, 0x000008e0, 0x00000930, 0x00000940,
+ /* 0x00e0 */ 0x00000800, 0x00000830, 0x000008d0, 0x00000860,
+ /* 0x00f0 */ 0x00000870, 0x000008b0, 0x000008c0, 0x00000000,
+
+ /* 0x0100 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /* 0x0110 */ 0x00000950, 0x00000000, 0x00000000, 0x00000000,
+ /* 0x0120 */ 0x00000000, 0x002addde, 0x00000000, 0x00000000,
+ /* 0x0130 */ 0x00000000, 0x002adddf, 0x00000000, 0x00000000,
+ /* 0x0140 */ 0x04020000, 0x00000a36, 0x00040000, 0x20614b55,
+ /* 0x0150 */ 0x614b344c, 0x69503a3a, 0x63617473, 0x206f6968,
+ /* 0x0160 */ 0x7562202d, 0x20746c69, 0x4a206e6f, 0x32206e61,
+ /* 0x0170 */ 0x30322032, 0x30203530, 0x36323a32, 0x2034313a,
+ /* 0x0180 */ 0x6d207962, 0x75637261, 0x6c754073, 0x65737379,
+ /* 0x0190 */ 0x73752073, 0x20676e69, 0x20636367, 0x73726576,
+ /* 0x01a0 */ 0x206e6f69, 0x2e332e33, 0x44282034, 0x61696265,
+ /* 0x01b0 */ 0x3a31206e, 0x2e332e33, 0x33312d34, 0x6d730029,
+ /* 0x01c0 */ 0x736c6c61, 0x65636170, 0x00000073, 0x00000000,
+ /* 0x01d0 */ 0x00000004, 0xfffffc00, 0x00000001, 0x0009f800,
+ /* 0x01e0 */ 0x00100001, 0x07fffc00, 0x000a0004, 0x000efc00,
+ /* 0x01f0 */ 0x07000002, 0x08000000, 0x00000201, 0xbffffc00,
+ /* 0x0200 */ 0x00100002, 0x0014ec00, 0x00000000, 0x00000000
+
+ /* The rest in the real KIP are 0x00, until offset 0x800, which
+ contains the system call stubs. */
+ };
+
+static word_t __attribute__((__unused__)) environment_api_version = 0x84050000;
+static word_t __attribute__((__unused__)) environment_api_flags = 0x00000000;
+static word_t __attribute__((__unused__)) environment_kernel_id = 0x04020000;
+
+/* 64 MRs forwards, 16 UTCB words and 33 BRs backwards. */
+static word_t environment_utcb[64 + 16 + 33];
+static word_t *environment_utcb_address = &environment_utcb[33 + 16];
+#else
+#error not ported to this architecture
+#endif
+
+
+#ifdef _L4_TEST_EXTERNAL_LIBL4
+
+#warning Add support for your external libl4 here.
+
+/* Only check the official interface. */
+#undef _L4_INTERFACE_INTERN
+#undef _L4_INTERFACE_GNU
+#define _L4_INTERFACE_L4 1
+
+#else /* _L4_TEST_EXTERNAL_LIBL4 */
+
+/* This signals to libl4 that we are running in a fake test
+ environment. */
+#define _L4_TEST_ENVIRONMENT 1
+
+/* Our implementation of the kernel interface system call. */
+#define _L4_TEST_KERNEL_INTERFACE_IMPL \
+ *api_version = environment_api_version; \
+ *api_flags = environment_api_flags; \
+ *kernel_id = environment_kernel_id; \
+ return (_L4_kip_t) environment_kip;
+
+/* Our implementation of the "get utcb address" function. */
+#define _L4_TEST_UTCB_IMPL \
+ return (_L4_word_t *) environment_utcb_address;
+
+/* Enable all interfaces. */
+#define _L4_INTERFACE_INTERN 1
+#define _L4_INTERFACE_L4 1
+#define _L4_INTERFACE_GNU 1
+
+#include <l4/features.h>
+
+/* If you want to test if you wrote the tests for the various
+ interfaces independently of each other, enable one of these. */
+#if 0
+/* Only the official interface. */
+#undef _L4_INTERFACE_INTERN
+#undef _L4_INTERFACE_GNU
+#elsif 0
+/* Only the GNU interface. */
+#undef _L4_INTERFACE_INTERN
+#undef _L4_INTERFACE_L4
+#elsif 0
+/* Only the internal interface. */
+#undef _L4_INTERFACE_GNU
+#undef _L4_INTERFACE_L4
+#endif
+
+#endif /* _L4_TEST_EXTERNAL_LIBL4 */
+
+
+#ifdef _L4_INTERFACE_GNU
+
+/* Include the global variables that need to be available in every
+ program. They are initialized by INIT. */
+#include <l4/globals.h>
+
+#endif /* _L4_INTERFACE_GNU */
+#endif /* T_ENVIRONMENT_H */
+
+#ifdef _L4_TEST_MAIN
+#include <string.h>
+#include <l4/stubs.h>
+
+const char program_name[100];
+
+/* Be verbose. */
+static int opt_verbose;
+
+/* Do not exit if errors occur. */
+static int opt_keep_going;
+
+
+/* True if a check failed. */
+static int failed;
+
+
+/* Initialize the fake environment. */
+void
+static inline environment_init (int argc, char *argv[])
+{
+ strncpy ((char *) program_name, argv[0], sizeof (program_name) - 1);
+
+ int i;
+
+#if _L4_INTERFACE_GNU
+ __l4_kip = (_L4_kip_t) environment_kip;
+#endif
+
+ for (i = 0; i < argc; i++)
+ {
+ char *arg;
+
+ if (i == 0)
+ {
+ arg = getenv ("TESTOPTS");
+ if (!arg)
+ continue;
+ }
+ else
+ {
+ arg = argv[i];
+
+ if (arg[0] != '-')
+ continue;
+ arg++;
+ }
+
+ while (*arg)
+ {
+ switch (*arg)
+ {
+ case 'v':
+ opt_verbose = 1;
+ break;
+
+ case 'k':
+ opt_keep_going = 1;
+ break;
+
+ default:
+ fprintf (stderr, "%s: warning: ignoring unknown option -%c\n",
+ argv[0], *arg);
+ break;
+ }
+ arg++;
+ }
+ }
+}
+
+
+/* Support macros. */
+
+#include <stdio.h>
+
+#define check(prefix,msg,cond,...) \
+ do \
+ { \
+ if (opt_verbose) \
+ printf ("%s Checking %s... ", prefix, msg); \
+ if (cond) \
+ { \
+ if (opt_verbose) \
+ printf ("OK\n"); \
+ } \
+ else \
+ { \
+ if (opt_verbose) \
+ printf ("failed\n"); \
+ fprintf (stderr, "FAIL: %s ", prefix); \
+ fprintf (stderr, __VA_ARGS__); \
+ fprintf (stderr, "\n"); \
+ failed = 1; \
+ if (!opt_keep_going) \
+ exit (1); \
+ } \
+ } \
+ while (0)
+
+#define check_nr(prefix,msg,val1,val2) \
+ do \
+ { \
+ typeof(val1) v1 = (val1); \
+ typeof(val2) v2 = (val2); \
+ \
+ check (prefix, msg, (v1 == v2), #val1 " == 0x%x != 0x%x", \
+ v1, v2); \
+ } \
+ while (0)
+
+
+void test (void);
+
+
+int
+main (int argc, char *argv[])
+{
+ /* Initialize the test environment. */
+ environment_init (argc, argv);
+
+ output_init ("stdio");
+
+ test ();
+
+ return failed ? 1 : 0;
+}
+#endif /* _L4_TEST_MAIN */
diff --git a/viengoos/thread.c b/viengoos/thread.c
new file mode 100644
index 0000000..e138f91
--- /dev/null
+++ b/viengoos/thread.c
@@ -0,0 +1,261 @@
+/* thread.c - Thread object implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <l4.h>
+#include <l4/thread-start.h>
+#include <hurd/ihash.h>
+#include <hurd/cap.h>
+#include <hurd/stddef.h>
+#include <bit-array.h>
+
+#include "cap.h"
+#include "object.h"
+#include "thread.h"
+#include "activity.h"
+
+/* Number of user thread ids. */
+#ifndef THREAD_IDS_MAX
+# define THREAD_IDS_MAX 1 << 16
+#endif
+
+/* Thread ids that are in use. We use one bit per thread id. */
+/* XXX: We need to save what thread ids are allocated on backing
+ store. In fact, we need to save pointers to the thread objects as
+ well so that we can restart all the threads when the system
+ restarts. See comment below for some problems. */
+static unsigned char thread_ids[THREAD_IDS_MAX / 8];
+/* The next thread id to consider for allocation. */
+static int thread_id_next;
+
+#define THREAD_ID_BASE (l4_thread_user_base () + 10)
+
+/* When a client invokes us, we need to be able to find its
+ corresponding thread structure. XXX: This hash is marginally
+ problematic the memory requirement is dynamic. In a kernel that
+ supported an end point abstraction with protected payload, we
+ wouldn't need this. */
+static struct hurd_ihash tid_to_thread
+ = HURD_IHASH_INITIALIZER (HURD_IHASH_NO_LOCP);
+
+struct thread *
+thread_lookup (l4_thread_id_t threadid)
+{
+ int tid = l4_thread_no (threadid);
+ struct thread *thread = hurd_ihash_find (&tid_to_thread, tid);
+ if (! thread)
+ debug (1, "(%x.%x) => NULL", tid, l4_version (threadid));
+ return thread;
+}
+
+void
+thread_create_in (struct activity *activity,
+ struct thread *thread)
+{
+ /* Allocate a thread id. */
+ /* Find the next free thread id starting at thread_id_next. */
+ int tid = bit_alloc (thread_ids, THREAD_IDS_MAX / 8, thread_id_next);
+ if (tid == -1)
+ panic ("No thread ids left!");
+ tid += THREAD_ID_BASE;
+ debug (2, "Allocated thread id 0x%x", tid);
+ thread_id_next = (tid + 1) % THREAD_IDS_MAX;
+ /* We don't assign any semantic meaning to the version field. We
+ use a version of 1 (0 is not legal for global thread ids). */
+ thread->tid = l4_global_id (tid, 1);
+
+ /* Set the initial activity to ACTIVITY. */
+ thread->activity = object_to_cap ((struct object *) activity);
+
+ bool had_value;
+ error_t err = hurd_ihash_replace (&tid_to_thread, tid, thread,
+ &had_value, NULL);
+ assert (err == 0);
+ assert (had_value == false);
+}
+
+error_t
+thread_create (struct activity *activity,
+ struct thread *caller,
+ addr_t faddr, l4_word_t index,
+ addr_t taddr,
+ struct thread **threadp)
+{
+ if (! (0 <= index && index < FOLIO_OBJECTS))
+ return EINVAL;
+
+ /* Find the folio to use. */
+ struct cap folio_cap = object_lookup_rel (activity, &caller->aspace,
+ faddr, cap_folio, NULL);
+ if (folio_cap.type == cap_void)
+ return ENOENT;
+ struct object *folio = cap_to_object (activity, &folio_cap);
+ if (! folio)
+ return ENOENT;
+
+ /* And the thread capability slot. */
+ bool writable;
+ struct cap *tcap = slot_lookup_rel (activity, &caller->aspace, taddr,
+ -1, &writable);
+ if (! tcap)
+ {
+ debug (1, "No capability at 0x%llx/%d",
+ addr_prefix (taddr), addr_depth (taddr));
+ return ENOENT;
+ }
+ if (! writable)
+ {
+ debug (1, "No permission to store at 0x%llx/%d",
+ addr_prefix (taddr), addr_depth (taddr));
+ return EPERM;
+ }
+
+ /* Allocate the page from the folio. */
+ struct object *o;
+ folio_object_alloc (activity, (struct folio *) folio, index,
+ cap_thread, &o);
+ struct thread *thread;
+ *threadp = thread = (struct thread *) o;
+
+ thread_create_in (activity, thread);
+ return 0;
+}
+
+
+void
+thread_destroy (struct activity *activity, struct thread *thread)
+{
+ /* Free the thread id. */
+ bit_dealloc (thread_ids,
+ l4_thread_no (thread->tid) - THREAD_ID_BASE);
+
+ int removed = hurd_ihash_remove (&tid_to_thread, thread->tid);
+ assert (removed == 0);
+
+ object_free (activity, (struct object *) thread);
+}
+
+/* FIXME:
+
+ Saving and restoring register state.
+
+ Here's the plan: when a thread is decommissioned, we do a space
+ control and bring the thread into our own address space. Then we
+ do an exregs to get its register state. We need then need to set
+ the thread running to grab its floating point state. Also, we need
+ to copy the utcb. We can do the same in reverse when commissioning
+ a thread.
+
+ There is one complication: if a thread is targetting by an IPC,
+ then we don't get a fault! The simpliest solution appears to be to
+ keep the kernel state around. And when the system starts to
+ restart all threads. (Just restarting threads in the receive phase
+ is not enough: when another thread does a send, the thread should
+ block.) */
+
+void
+thread_commission (struct thread *thread)
+{
+ assert (! thread->commissioned);
+
+ /* Create the thread. */
+ l4_word_t ret;
+ ret = l4_thread_control (thread->tid, thread->tid,
+ l4_myself (), l4_nilthread, (void *) -1);
+ if (! ret)
+ panic ("Could not create initial thread (id=%x.%x): %s",
+ l4_thread_no (thread->tid), l4_version (thread->tid),
+ l4_strerror (l4_error_code ()));
+
+ l4_word_t control;
+ ret = l4_space_control (thread->tid, l4_nilthread,
+ l4_fpage_log2 (KIP_BASE,
+ l4_kip_area_size_log2 ()),
+ l4_fpage (UTCB_AREA_BASE, UTCB_AREA_SIZE),
+ l4_anythread, &control);
+ if (! ret)
+ panic ("Could not create address space: %s",
+ l4_strerror (l4_error_code ()));
+
+ ret = l4_thread_control (thread->tid, thread->tid,
+ l4_nilthread,
+ l4_myself (),
+ (void *) UTCB_AREA_BASE);
+ if (! ret)
+ panic ("Failed to create thread: %s", l4_strerror (l4_error_code ()));
+
+ /* XXX: Restore the register state! (See comment above for the
+ plan.) */
+
+ /* Start the thread. */
+ if (thread->sp || thread->ip)
+ {
+ l4_word_t ret = l4_thread_start (thread->tid, thread->sp, thread->ip);
+ assert (ret == 1);
+ }
+
+ thread->commissioned = 1;
+}
+
+void
+thread_decommission (struct thread *thread)
+{
+ assert (thread->commissioned);
+
+ /* XXX: Save the register state! (See comment above for the
+ plan.) */
+
+ /* Free the thread. */
+ l4_word_t ret;
+ ret = l4_thread_control (thread->tid, l4_nilthread,
+ l4_nilthread, l4_nilthread, (void *) -1);
+ if (! ret)
+ panic ("Failed to delete thread %d",
+ l4_thread_no (thread->tid));
+
+ thread->commissioned = 0;
+}
+
+error_t
+thread_send_sp_ip (struct activity *activity,
+ struct thread *caller, addr_t taddr,
+ l4_word_t sp, l4_word_t ip)
+{
+ struct cap cap = object_lookup_rel (activity, &caller->aspace, taddr,
+ cap_thread, NULL);
+ if (cap.type == cap_void)
+ return ENOENT;
+ struct object *object = cap_to_object (activity, &cap);
+ if (! object)
+ return ENOENT;
+ struct thread *thread = (struct thread *) object;
+
+ /* After this point nothing may block or fail (of course, user
+ errors are okay). */
+
+ if (thread->commissioned)
+ return EINVAL;
+
+ thread->sp = sp;
+ thread->ip = ip;
+
+ thread_commission (thread);
+
+ return 0;
+}
diff --git a/viengoos/thread.h b/viengoos/thread.h
new file mode 100644
index 0000000..a8849fa
--- /dev/null
+++ b/viengoos/thread.h
@@ -0,0 +1,110 @@
+/* thread.h - Thread object interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RM_THREAD_H
+#define RM_THREAD_H
+
+#include <l4.h>
+#include <errno.h>
+
+#include "cap.h"
+
+/* Forward. */
+struct folio;
+struct activity;
+
+/* Number of capability slots at the start of the thread
+ structure. */
+enum
+ {
+ THREAD_SLOTS = 2,
+ };
+
+struct thread
+{
+ /* Address space. */
+ struct cap aspace;
+
+ /* The current associated activity. (Not the activity out of which
+ this thread's storage is allocated!) */
+ struct cap activity;
+
+ /* Allocated thread id. */
+ l4_thread_id_t tid;
+
+ /* XXX: Register state, blah, blah, blah. */
+ l4_word_t sp;
+ l4_word_t ip;
+
+ /* Debugging: whether the thread has been commissioned. */
+ int commissioned;
+
+};
+
+/* The hardwired base of the UTCB (2.5GB). */
+#define UTCB_AREA_BASE (0xA0000000)
+/* The size of the UTCB. */
+#define UTCB_AREA_SIZE (l4_utcb_area_size ())
+/* The hardwired base of the KIP. */
+#define KIP_BASE (UTCB_AREA_BASE + UTCB_AREA_SIZE)
+
+/* Create a new thread. Uses the object THREAD to store the thread
+ information. */
+extern void thread_create_in (struct activity *activity,
+ struct thread *thread);
+
+/* Create a new thread. FOLIO designates a folio in CALLER's CSPACE.
+ INDEX specifies which object in the folio to use for the new
+ thread's storage. Sets the thread's current activity to ACTIVITY.
+ On success, a capability to this object is saved in the capability
+ slot at address THREAD in CALLER's address space, the thread object
+ is returned in *THREADP and 0 is returned. Otherwise, an error
+ code. */
+extern error_t thread_create (struct activity *activity,
+ struct thread *caller,
+ addr_t folio, l4_word_t index,
+ addr_t thread,
+ struct thread **threadp);
+
+/* Destroy the thread object THREAD (and the accompanying thread). */
+extern void thread_destroy (struct activity *activity,
+ struct thread *thread);
+
+/* Prepare the thread object THREAD to run. (Called after bringing a
+ thread object into core.) */
+extern void thread_commission (struct thread *thread);
+
+/* Save any state of the thread THREAD and destroy any ephemeral
+ resources. (Called before sending the object to backing
+ store.) */
+extern void thread_decommission (struct thread *thread);
+
+/* Send a thread start message to the thread THREAD (in CALLER's
+ address space). This may be called at most once per thread. If
+ called multiple times, the results are undefined. If thread is not
+ decommissioned, returns EINVAL. Commissions thread. */
+extern error_t thread_send_sp_ip (struct activity *activity,
+ struct thread *caller, addr_t thread,
+ l4_word_t sp, l4_word_t ip);
+
+/* Given the L4 thread id THREADID, find the associated thread. */
+extern struct thread *thread_lookup (l4_thread_id_t threadid);
+
+#endif
diff --git a/viengoos/viengoos.c b/viengoos/viengoos.c
new file mode 100644
index 0000000..99c781b
--- /dev/null
+++ b/viengoos/viengoos.c
@@ -0,0 +1,449 @@
+/* viengoos.c - Main file for viengoos.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <assert.h>
+#include <unistd.h>
+#include <alloca.h>
+#include <string.h>
+
+#include <l4/thread-start.h>
+#include <l4/pagefault.h>
+
+#include <hurd/startup.h>
+#include <hurd/stddef.h>
+
+#include "viengoos.h"
+#include "sigma0.h"
+#include "memory.h"
+#include "boot-modules.h"
+#include "loader.h"
+#include "cap.h"
+#include "object.h"
+#include "activity.h"
+#include "thread.h"
+#include "as.h"
+#include "server.h"
+#include "shutdown.h"
+#include "output.h"
+
+
+#define BUG_ADDRESS "<bug-hurd@gnu.org>"
+
+struct activity *root_activity;
+
+/* The program name. */
+const char program_name[] = "viengoos";
+
+/* The following must be defined and are used to calculate the extents
+ of the laden binary itself. _END is one more than the address of
+ the last byte. */
+extern char _start;
+extern char _end;
+
+
+static void
+parse_args (int argc, char *argv[])
+{
+ int i = 1;
+
+ while (i < argc)
+ {
+ if (!strcmp (argv[i], "--usage"))
+ {
+ i++;
+ printf ("Usage %s [OPTION...]\n", argv[0]);
+ printf ("Try `%s --help' for more information\n", program_name);
+ shutdown_machine ();
+ }
+ else if (!strcmp (argv[i], "--help"))
+ {
+ struct output_driver **drv = output_drivers;
+
+ i++;
+ printf ("Usage: %s [OPTION...]\n"
+ "\n"
+ "Boot the Hurd system and wrap the L4 privileged system "
+ "calls.\n"
+ "\n"
+ " -o, --output DRV use output driver DRV\n"
+ " -D, --debug LEVEL enable debug output (1-5)\n"
+ " -h, --halt halt the system at error (default)\n"
+ " -r, --reboot reboot the system at error\n"
+ "\n"
+ " --usage print out some usage information and "
+ "exit\n"
+ " --help display this help and exit\n"
+ " --version output version information and exit\n"
+ "\n", argv[0]);
+
+ printf ("Valid output drivers are: ");
+ while (*drv)
+ {
+ printf ("%s", (*drv)->name);
+ if (drv == output_drivers)
+ printf (" (default)");
+ drv++;
+ if (*drv && (*drv)->name)
+ printf (", ");
+ else
+ printf (".\n\n");
+ }
+
+ printf ("Report bugs to " BUG_ADDRESS ".\n");
+ shutdown_machine ();
+ }
+ else if (!strcmp (argv[i], "--version"))
+ {
+ i++;
+ printf ("%s " PACKAGE_VERSION "\n", program_name);
+ shutdown_machine ();
+ }
+ else if (!strcmp (argv[i], "-o") || !strcmp (argv[i], "--output"))
+ {
+ i++;
+ if (!output_init (argv[i]))
+ panic ("Unknown output driver %s", argv[i]);
+ i++;
+ }
+ else if (!strcmp (argv[i], "-h") || !strcmp (argv[i], "--halt"))
+ {
+ i++;
+ shutdown_reset = 0;
+ }
+ else if (!strcmp (argv[i], "-r") || !strcmp (argv[i], "--reset"))
+ {
+ i++;
+ shutdown_reset = 1;
+ }
+ else if (!strcmp (argv[i], "-D") || !strcmp (argv[i], "--debug"))
+ {
+ i++;
+ if (! ('0' <= argv[i][0] && argv[i][0] <= '9'))
+ panic ("Option -D expects an integer argument");
+ output_debug = atoi (argv[i]);
+ i++;
+ }
+ else if (argv[i][0] == '-')
+ panic ("Unsupported option %s", argv[i]);
+ else
+ panic ("Invalid non-option argument %s", argv[i]);
+ }
+}
+
+static void
+memory_configure (void)
+{
+ /* Reserve their memory. */
+ int i;
+ for (i = 0; i < boot_module_count; i ++)
+ {
+ if (! memory_reserve (boot_modules[i].start, boot_modules[i].end,
+ i == 0 ? memory_reservation_system_executable
+ : memory_reservation_modules))
+ panic ("Failed to reserve memory for boot module %d (%s).",
+ i, boot_modules[i].command_line);
+ if (boot_modules[i].command_line
+ && ! memory_reserve ((l4_word_t) boot_modules[i].command_line,
+ (l4_word_t) boot_modules[i].command_line
+ + strlen (boot_modules[i].command_line),
+ i == 0 ? memory_reservation_system_executable
+ : memory_reservation_modules))
+ panic ("Failed to reserve memory for boot module %d's "
+ "command line (%s).",
+ i, boot_modules[i].command_line);
+ }
+
+ /* Grab all available physical memory. */
+ if (3 < output_debug)
+ memory_reserve_dump ();
+ memory_grab ();
+
+ printf ("memory: %x-%x\n", first_frame, last_frame);
+}
+
+void
+system_task_load (void)
+{
+ struct hurd_startup_data *startup_data = (void *) memory_frame_allocate ();
+
+ bool boot_strapped = false;
+
+ struct thread *thread;
+
+ /* The area where we will store the hurd object descriptors won't be
+ ready until after we have already allocated some objects. We
+ allocate a few descriptors, which should be more than enough. */
+ struct hurd_object_desc *descs = (void *) &startup_data[1];
+ int desc_max = ((PAGESIZE - sizeof (struct hurd_startup_data))
+ / sizeof (struct hurd_object_desc));
+ struct object *objects[desc_max];
+ int desc_count = 0;
+
+ struct folio *folio = NULL;
+ int folio_index;
+ addr_t folio_addr;
+
+ struct as_insert_rt allocate_object (enum cap_type type, addr_t addr)
+ {
+ debug (4, "(%s, 0x%llx/%d)",
+ cap_type_string (type), addr_prefix (addr), addr_depth (addr));
+
+ assert (type != cap_void);
+ assert (type != cap_folio);
+
+ if (! folio || folio_index == FOLIO_OBJECTS)
+ /* Allocate additional storage. */
+ {
+ static int f = 1;
+
+ folio = folio_alloc (root_activity);
+ folio_index = 0;
+
+ /* XXX: Allocate more space. */
+ if (desc_count == desc_max)
+ panic ("Out of object descriptors (binary too big)");
+ int i = desc_count ++;
+ struct hurd_object_desc *desc = &descs[i];
+ /* We allocate a folio such that pages allocated are
+ mappable in the data address space. */
+ folio_addr = desc->object = ADDR (f << (FOLIO_OBJECTS_LOG2
+ + PAGESIZE_LOG2),
+ ADDR_BITS - FOLIO_OBJECTS_LOG2
+ - PAGESIZE_LOG2);
+ f ++;
+ desc->type = cap_folio;
+
+ objects[i] = (struct object *) folio;
+
+ if (boot_strapped)
+ as_insert (root_activity, &thread->aspace, folio_addr,
+ object_to_cap ((struct object *) folio), ADDR_VOID,
+ allocate_object);
+ }
+
+ struct object *object;
+ int index = folio_index ++;
+ folio_object_alloc (root_activity, folio, index, type, &object);
+
+ if (! (desc_count < desc_max))
+ panic ("Initial task too large.");
+
+ int i = desc_count ++;
+ objects[i] = object;
+ struct hurd_object_desc *desc = &descs[i];
+
+ desc->object = addr;
+ desc->storage = addr_extend (folio_addr, index, FOLIO_OBJECTS_LOG2);
+ desc->type = type;
+
+ struct as_insert_rt rt;
+ rt.cap = object_to_cap (object);
+ rt.storage = desc->storage;
+ return rt;
+ }
+
+ /* When allocating objects, we allocate them above 4GB. */
+ l4_uint64_t next_free_page = 1ULL << 32;
+ addr_t capability_cappage = ADDR_VOID;
+ struct object *capability_cappage_object;
+ int capability_cappage_count;
+
+ /* Allocate a capability location. */
+ addr_t csalloc (void)
+ {
+ if (ADDR_IS_VOID (capability_cappage)
+ || capability_cappage_count == CAPPAGE_SLOTS)
+ {
+ capability_cappage = ADDR (next_free_page,
+ ADDR_BITS - PAGESIZE_LOG2);
+ next_free_page += PAGESIZE;
+
+ capability_cappage_count = 0;
+
+ struct cap cap
+ = allocate_object (cap_cappage, capability_cappage).cap;
+ struct object *object = cap_to_object (root_activity, &cap);
+ if (boot_strapped)
+ as_insert (root_activity, &thread->aspace, capability_cappage,
+ object_to_cap (object), ADDR_VOID, allocate_object);
+ else
+ capability_cappage_object = object;
+ }
+
+ return addr_extend (capability_cappage, capability_cappage_count ++,
+ CAPPAGE_SLOTS_LOG2);
+ }
+
+ /* XXX: Boostrap problem. To allocate a folio we need to assign it
+ to a principle, however, the representation of a principle
+ requires storage. Our solution is to allow a folio to be created
+ without specifying a resource principal, allocating a resource
+ principal and then assigning the folio to that resource
+ principal.
+
+ This isn't really a good solution as once we really go the
+ persistent route, there may be references to the data structures
+ in the persistent image. Moreover, the root activity data needs
+ to be saved.
+
+ A way around this problem would be the approach that EROS takes:
+ start with a hand-created system image. */
+ startup_data->activity = csalloc ();
+ struct cap cap = allocate_object (cap_activity, startup_data->activity).cap;
+ root_activity = (struct activity *) cap_to_object (root_activity, &cap);
+ folio_reparent (root_activity, folio, root_activity);
+
+ startup_data->thread = csalloc ();
+ cap = allocate_object (cap_thread, startup_data->thread).cap;
+ thread = (struct thread *) cap_to_object (root_activity, &cap);
+ thread_create_in (root_activity, thread);
+
+ /* Insert the objects we've allocated so far into TASK's address
+ space. */
+ boot_strapped = true;
+
+ as_insert (root_activity, &thread->aspace, capability_cappage,
+ object_to_cap (capability_cappage_object), ADDR_VOID,
+ allocate_object);
+ as_insert (root_activity, &thread->aspace, folio_addr,
+ object_to_cap ((struct object *) folio), ADDR_VOID,
+ allocate_object);
+
+ /* We insert the thread and activity under two difference names: one
+ reference for the hurd object descriptor and one for
+ STARTUP_DATA->NAME. */
+ as_insert (root_activity, &thread->aspace, startup_data->activity,
+ object_to_cap ((struct object *) root_activity), ADDR_VOID,
+ allocate_object);
+ startup_data->activity = csalloc ();
+ as_insert (root_activity, &thread->aspace, startup_data->activity,
+ object_to_cap ((struct object *) root_activity), ADDR_VOID,
+ allocate_object);
+
+ as_insert (root_activity, &thread->aspace, startup_data->thread,
+ object_to_cap ((struct object *) thread), ADDR_VOID,
+ allocate_object);
+ startup_data->thread = csalloc ();
+ as_insert (root_activity, &thread->aspace, startup_data->thread,
+ object_to_cap ((struct object *) thread), ADDR_VOID,
+ allocate_object);
+
+ /* Allocate the startup data object and copy the data from the
+ temporary page, updating any necessary pointers. */
+#define STARTUP_DATA_ADDR 0x1000
+ addr_t startup_data_addr = ADDR (STARTUP_DATA_ADDR,
+ ADDR_BITS - PAGESIZE_LOG2);
+ cap = allocate_object (cap_page, startup_data_addr).cap;
+ struct object *startup_data_page = cap_to_object (root_activity, &cap);
+ as_insert (root_activity, &thread->aspace, startup_data_addr,
+ object_to_cap (startup_data_page), ADDR_VOID, allocate_object);
+ memcpy (startup_data_page, startup_data, PAGESIZE);
+ /* Free the staging area. */
+ memory_frame_free ((l4_word_t) startup_data);
+ startup_data = (void *) startup_data_page;
+ descs = (void *) &startup_data[1];
+
+ startup_data = (struct hurd_startup_data *) startup_data_page;
+ startup_data->version_major = HURD_STARTUP_VERSION_MAJOR;
+ startup_data->version_minor = HURD_STARTUP_VERSION_MINOR;
+ startup_data->utcb_area = UTCB_AREA_BASE;
+ startup_data->rm = l4_myself ();
+ startup_data->descs
+ = (void *) STARTUP_DATA_ADDR + (sizeof (struct hurd_startup_data));
+
+ thread->sp = STARTUP_DATA_ADDR;
+
+ /* Load the binary. */
+ loader_elf_load (allocate_object, root_activity, thread,
+ "system", boot_modules[0].start, boot_modules[0].end,
+ &thread->ip);
+
+
+ /* Add the argument vector. If it would overflow the page, we
+ truncate it. */
+ startup_data->argz_len = strlen (boot_modules[0].command_line) + 1;
+
+ int offset = sizeof (struct hurd_startup_data)
+ + desc_count * sizeof (struct hurd_object_desc);
+ int space = PAGESIZE - offset;
+ if (space < startup_data->argz_len)
+ {
+ printf ("Truncating command line from %d to %d characters\n",
+ startup_data->argz_len, space);
+ startup_data->argz_len = space;
+ }
+ memcpy ((void *) startup_data + offset, boot_modules[0].command_line,
+ startup_data->argz_len - 1);
+ startup_data->argz = (void *) STARTUP_DATA_ADDR + offset;
+
+ startup_data->desc_count = desc_count;
+
+ /* Release the memory used by the binary. */
+ memory_reservation_clear (memory_reservation_system_executable);
+
+ if (2 < output_debug)
+ /* Dump the system task's address space before we start it
+ running. */
+ {
+ printf ("System task's AS\n");
+ as_dump_from (root_activity, &thread->aspace, "");
+ }
+
+ thread_commission (thread);
+ debug (1, "System task started (tid: %x.%x; ip=0x%x).",
+ l4_thread_no (thread->tid), l4_version (thread->tid), thread->ip);
+}
+
+int
+main (int argc, char *argv[])
+{
+ parse_args (argc, argv);
+
+ debug (1, "%s " PACKAGE_VERSION " (%x)", program_name, l4_my_global_id ());
+
+ /* Assert that the size of a cap is a power of 2. */
+ assert ((sizeof (struct cap) & (sizeof (struct cap) - 1)) == 0);
+
+ /* Reserve the rm binary. */
+ if (! memory_reserve ((l4_word_t) &_start, (l4_word_t) &_end,
+ memory_reservation_self))
+ panic ("Failed to reserve memory for self.");
+
+ /* Find the modules. */
+ find_components ();
+
+ memory_configure ();
+
+ object_init ();
+
+ /* Load the system task. */
+ system_task_load ();
+
+ /* And, start serving requests. */
+ server_loop ();
+
+ /* Should never return. */
+ panic ("server_loop returned!");
+ return 0;
+}
diff --git a/viengoos/viengoos.h b/viengoos/viengoos.h
new file mode 100644
index 0000000..e9fd3bf
--- /dev/null
+++ b/viengoos/viengoos.h
@@ -0,0 +1,40 @@
+/* priv.h - Private declarations.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef PRIV_H
+#define PRIV_H
+
+#include <l4/types.h>
+
+/* The program name, set statically. */
+extern const char program_name[];
+
+/* A pointer to the root activity. */
+extern struct activity *root_activity;
+
+/* The arch-independent main function called by the arch dependent
+ code. */
+extern int main (int, char *[]);
+
+/* Find the kernel, the initial servers and the other information
+ required for booting. */
+extern void find_components (void);
+
+#endif
diff --git a/viengoos/zalloc.c b/viengoos/zalloc.c
new file mode 100644
index 0000000..0b49787
--- /dev/null
+++ b/viengoos/zalloc.c
@@ -0,0 +1,285 @@
+/* Zone allocator for physical memory server.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Neal H Walfield.
+ Modified by Marcus Brinkmann.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <assert.h>
+#include <string.h>
+#include <hurd/stddef.h>
+
+#include "zalloc.h"
+
+/* Zalloc: A fast zone allocator. This is not a general purpose
+ allocator. If you attempt to use it as such, you will find that it
+ is very inefficient. It is, however, designed to be very fast and
+ to be used as a base for building a more general purpose allocator.
+
+ Memory is kept in zones. Zones are of sizes 2 ** N and all memory
+ is aligned on a similar boundary. Typically, the smallest zone
+ will be the system page size. Memory of any size can be added to
+ the pool as long as it is a multiple of the smallest zone: it is
+ broken up as necessary.
+
+ Memory can be added to the pool by calling the zfree function with
+ the address of the buffer and its size. The buffer is broken up as
+ a function of its alignment and size using the buddy system (as
+ described by e.g. Knuth). Consider the following: zfree (4k, 16k).
+ This says that a buffer of size 16k starting at address 4k should
+ be added to the system. Although the size of the buffer is a power
+ of 2 (2 ** 14 = 16k), it cannot be added to the 16k zone: it has
+ the wrong alignment. Instead, the initial 4k are broken off, added
+ to the 4k zone, the next 8k to the 8k zone and the final 4k to the
+ 4k zone. If, as memory is added to a zone, its buddy is present,
+ the two buffers are buddied up and promoted to the next zone. For
+ instance, if the 4k buffer at address 20k was present during the
+ previous zfree, the bufer at 16k would have been combined with this
+ and the new larger buffer would have been added to the 8k zone.
+
+ When allocating memory, the smallest zone that is larger than or
+ equal to the desired size is selected. If the zone is exhausted,
+ the allocator will look in the next larger zone and break up a
+ buffer to satisfy the request. This continues recursively if
+ necessary. If the desired size is smaller than the buffer that is
+ selected, the difference is returned to the system. For instance,
+ if an allocation request of 12k is made, the system will start
+ looking in the 16k zone. If it finds that that zone is exhausted,
+ it will select a buffer from the 32k zone and place the top half in
+ the 16k zone and use the lower half for the allocation. However,
+ as this is 4k too much, the extra is returned to the 4k zone.
+
+ When making allocations, the system will not look for adjacent
+ memory blocks: if an allocation request of e.g. 8k is issued and
+ there is no memory in the 8k zones and above, the 4k zone will not
+ be searched for false buddies. That is, if in the 4k zone there is
+ a buffer starting at 4k and 8k, the allocator will make no effort
+ to search for them. Note that they could not have been combined
+ during the zfree as 4k's buddy is at 0k and 8k's buddy is at
+ 12k. */
+
+
+/* A free block list ordered by address. Blocks are of size 2 ** N
+ and aligned on a similar boundary. Since the contents of a block
+ does not matter (it is free), the block itself contains this
+ structure at its start address. */
+struct block
+{
+ struct block *next;
+ struct block *prev;
+};
+
+
+/* Given a zone, return its size. */
+#define ZONE_SIZE(x) (1 << ((x) + PAGESIZE_LOG2))
+
+/* Number of zones in the system. */
+#define ZONES (sizeof (L4_Word_t) * 8 - PAGESIZE_LOG2)
+
+/* The zones. */
+static struct block *zone[ZONES] = { 0, };
+
+
+/* Add the block BLOCK to the zone ZONE_NR. The block has the
+ right size and alignment. Buddy up if possible. */
+static inline void
+add_block (struct block *block, unsigned int zone_nr)
+{
+ while (1)
+ {
+ struct block *left = 0;
+ struct block *right = zone[zone_nr];
+
+ /* Find the left and right neighbours of BLOCK. */
+ while (right && block > right)
+ {
+ left = right;
+ right = right->next;
+ }
+
+ if (left && (((l4_word_t) left) ^ ((l4_word_t) block))
+ == ZONE_SIZE (zone_nr))
+ {
+ /* Buddy on the left. */
+
+ /* Remove left neighbour. */
+ if (left->prev)
+ left->prev->next = left->next;
+ else
+ zone[zone_nr] = left->next;
+ if (left->next)
+ left->next->prev = left->prev;
+
+ block = left;
+ zone_nr++;
+ }
+ else if (right && (((l4_word_t) right) ^ ((l4_word_t) block))
+ == ZONE_SIZE (zone_nr))
+ {
+ /* Buddy on the right. */
+
+ /* Remove right neighbour from the list. */
+ if (right->prev)
+ right->prev->next = right->next;
+ else
+ zone[zone_nr] = right->next;
+ if (right->next)
+ right->next->prev = right->prev;
+
+ zone_nr++;
+ }
+ else
+ {
+ /* Could not coalesce. Just insert. */
+
+ block->next = right;
+ if (block->next)
+ block->next->prev = block;
+
+ block->prev = left;
+ if (block->prev)
+ block->prev->next = block;
+ else
+ zone[zone_nr] = block;
+
+ /* This is the terminating case. */
+ break;
+ }
+ }
+}
+
+
+/* Add the block BLOCK of size SIZE to the pool. BLOCK must be
+ aligned to the system's minimum page size. SIZE must be a multiple
+ of the system's minimum page size. */
+void
+zfree (l4_word_t block, l4_word_t size)
+{
+ debug (4, "freeing block 0x%x - 0x%x", block, block + size);
+
+ if (size & (PAGESIZE - 1))
+ panic ("%s: size 0x%x of freed block 0x%x is not a multiple of "
+ "minimum page size", __func__, size, block);
+
+ if (block & (PAGESIZE - 1))
+ panic ("%s: freed block 0x%x of size 0x%x is not aligned to "
+ "minimum page size", __func__, block, size);
+
+ do
+ {
+ /* All blocks must be stored aligned to their size. */
+ unsigned int block_align = l4_lsb (block) - 1;
+ unsigned int size_align = l4_msb (size) - 1;
+ unsigned int zone_nr = (block_align < size_align
+ ? block_align : size_align) - PAGESIZE_LOG2;
+
+ add_block ((struct block *) block, zone_nr);
+
+ block += ZONE_SIZE (zone_nr);
+ size -= ZONE_SIZE (zone_nr);
+ }
+ while (size > 0);
+}
+
+
+/* Allocate a block of memory of size SIZE and return its address.
+ SIZE must be a multiple of the system's minimum page size. If no
+ block of the required size could be allocated, return 0. */
+l4_word_t
+zalloc (l4_word_t size)
+{
+ unsigned int zone_nr;
+ struct block *block;
+
+ debug (4, "request for 0x%x bytes", size);
+
+ if (size & (PAGESIZE - 1))
+ panic ("%s: requested size 0x%x is not a multiple of "
+ "minimum page size", __func__, size);
+
+ /* Calculate the logarithm to base two of SIZE rounded up to the
+ nearest power of two (actually, the MSB function returns one more
+ than the logarithm to base two of its argument, rounded down to
+ the nearest power of two - this is the same except for the border
+ case where only one bit is set. To adjust for this border case,
+ we subtract one from the argument to the MSB function). Calculate
+ the zone number by subtracting page shift. */
+ zone_nr = l4_msb (size - 1) - PAGESIZE_LOG2;
+
+ /* Find the smallest zone which fits the request and has memory
+ available. */
+ while (!zone[zone_nr] && zone_nr < ZONES)
+ zone_nr++;
+
+ if (zone_nr == ZONES)
+ {
+ debug (1, "Cannot allocate a block of %d bytes!", size);
+ return 0;
+ }
+
+ /* Found a zone. Now bite off the beginning of the first block in
+ this zone. */
+ block = zone[zone_nr];
+
+ zone[zone_nr] = block->next;
+ if (zone[zone_nr])
+ zone[zone_nr]->prev = 0;
+
+ /* And donate back the remainder of this block, if any. */
+ if (ZONE_SIZE (zone_nr) > size)
+ zfree (((l4_word_t) block) + size, ZONE_SIZE (zone_nr) - size);
+
+ /* Zero out the newly allocated block. */
+ memset (block, 0, size);
+
+ return (l4_word_t) block;
+}
+
+
+/* Dump the internal data structures. */
+#ifndef NDEBUG
+void
+zalloc_dump_zones (const char *prefix)
+{
+ int i;
+ struct block *block;
+ l4_word_t available = 0;
+ int print_empty = 0;
+
+ for (i = ZONES - 1; ZONE_SIZE (i) >= PAGESIZE; i--)
+ if (zone[i] || print_empty)
+ {
+ print_empty = 1;
+ printf ("%s: 0x%x: { ", prefix, ZONE_SIZE (i));
+ for (block = zone[i]; block; block = block->next)
+ {
+ available += ZONE_SIZE (i);
+ printf ("%p%s", block, (block->next ? ", " : " "));
+ }
+ printf ("}\n");
+ }
+
+ printf ("%s: %llu (0x%llx) kbytes available\n", prefix,
+ (unsigned long long) available / 1024,
+ (unsigned long long) available / 1024);
+}
+#endif
diff --git a/viengoos/zalloc.h b/viengoos/zalloc.h
new file mode 100644
index 0000000..caea4a4
--- /dev/null
+++ b/viengoos/zalloc.h
@@ -0,0 +1,40 @@
+/* Zone allocator for physical memory server.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Written by Neal H Walfield.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef __ZALLOC_H__
+#define __ZALLOC_H__
+
+#include <l4.h>
+
+/* Add to the pool the block BLOCK of size SIZE. BLOCK must be
+ aligned to the system's minimum page size. SIZE must be a multiple
+ of the system's minimum page size. */
+void zfree (l4_word_t block, l4_word_t size);
+
+/* Allocate a block of memory of size SIZE. SIZE must be a multiple
+ of the system's minimum page size. */
+l4_word_t zalloc (l4_word_t size);
+
+/* Dump some internal data structures. Only defined if zalloc was
+ compiled without NDEBUG defined. */
+void zalloc_dump_zones (const char *prefix);
+
+#endif /* __ZALLOC_H__ */