summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorneal <neal>2008-08-22 12:09:22 +0000
committerneal <neal>2008-08-22 12:09:22 +0000
commitdc9064e55f993d79c6ed1ca7093812eaa2fececd (patch)
tree14e7610a3a6c89a7d4943fe8b44fd50a3d908c84
parent46828ec1c04a56f0d83eaf36d8161316c24b07f3 (diff)
2008-08-22 Neal H. Walfield <neal@gnu.org>
* libhurd-cap: Remove. * libhurd-cap-server: Likewise. * physmem: Likewise. * task: Likewise.
-rw-r--r--ChangeLog6
-rw-r--r--libhurd-cap-server/ChangeLog499
-rw-r--r--libhurd-cap-server/Makefile.am41
-rw-r--r--libhurd-cap-server/README185
-rw-r--r--libhurd-cap-server/bucket-create.c176
-rw-r--r--libhurd-cap-server/bucket-free.c47
-rw-r--r--libhurd-cap-server/bucket-inhibit.c142
-rw-r--r--libhurd-cap-server/bucket-inject.c58
-rw-r--r--libhurd-cap-server/bucket-manage-mt.c1116
-rw-r--r--libhurd-cap-server/bucket-worker-alloc.c51
-rw-r--r--libhurd-cap-server/cap-server-intern.h599
-rw-r--r--libhurd-cap-server/cap-server.h573
-rw-r--r--libhurd-cap-server/class-alloc.c64
-rw-r--r--libhurd-cap-server/class-create.c75
-rw-r--r--libhurd-cap-server/class-destroy.c58
-rw-r--r--libhurd-cap-server/class-free.c48
-rw-r--r--libhurd-cap-server/class-inhibit.c114
-rw-r--r--libhurd-cap-server/class-init.c180
-rw-r--r--libhurd-cap-server/client-create.c213
-rw-r--r--libhurd-cap-server/client-inhibit.c157
-rw-r--r--libhurd-cap-server/client-release.c192
-rw-r--r--libhurd-cap-server/ctx-cap-use.c278
-rw-r--r--libhurd-cap-server/headers.m413
-rw-r--r--libhurd-cap-server/obj-copy-out.c121
-rw-r--r--libhurd-cap-server/obj-dealloc.c50
-rw-r--r--libhurd-cap-server/obj-drop.c36
-rw-r--r--libhurd-cap-server/obj-entry-space.c52
-rw-r--r--libhurd-cap-server/obj-inhibit.c131
-rw-r--r--libhurd-cap-server/table.c96
-rw-r--r--libhurd-cap-server/table.h207
-rw-r--r--libhurd-cap-server/task-death.c77
-rw-r--r--libhurd-cap-server/task-death.h123
-rw-r--r--libhurd-cap/ChangeLog6
-rw-r--r--libhurd-cap/Makefile.am28
-rw-r--r--libhurd-cap/cap-intern.h24
-rw-r--r--libhurd-cap/cap-move.c259
-rw-r--r--libhurd-cap/cap-user.c210
-rw-r--r--libhurd-cap/cap.c236
-rw-r--r--libhurd-cap/cap.h132
-rw-r--r--libhurd-cap/headers.m413
-rw-r--r--physmem/ChangeLog403
-rw-r--r--physmem/Makefile.am47
-rw-r--r--physmem/README89
-rw-r--r--physmem/config.m421
-rw-r--r--physmem/container.c718
-rw-r--r--physmem/frame-entry.c1232
-rw-r--r--physmem/frame.c186
-rw-r--r--physmem/headers.m413
-rw-r--r--physmem/ia32-cmain.c112
-rw-r--r--physmem/ia32-crt0.S44
-rw-r--r--physmem/malloc-wrap.c54
-rw-r--r--physmem/malloc.c5567
-rw-r--r--physmem/mmap.c53
-rw-r--r--physmem/output.c227
-rw-r--r--physmem/output.h60
-rw-r--r--physmem/physmem.c283
-rw-r--r--physmem/physmem.h76
-rw-r--r--physmem/priv.h363
-rw-r--r--physmem/zalloc.c287
-rw-r--r--physmem/zalloc.h40
-rw-r--r--task/ChangeLog142
-rw-r--r--task/Makefile.am47
-rw-r--r--task/ia32-cmain.c151
-rw-r--r--task/ia32-crt0.S56
-rw-r--r--task/malloc-wrap.c54
-rw-r--r--task/malloc.c5567
-rw-r--r--task/mmap.c73
-rw-r--r--task/output.c228
-rw-r--r--task/output.h62
-rw-r--r--task/task-class.c187
-rw-r--r--task/task-id.c121
-rw-r--r--task/task.c281
-rw-r--r--task/task.h147
-rw-r--r--task/thread.c155
74 files changed, 5 insertions, 23827 deletions
diff --git a/ChangeLog b/ChangeLog
index b34562a..d464d7d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,6 +1,10 @@
2008-08-22 Neal H. Walfield <neal@gnu.org>
- * wortel: Remove.
+ * libhurd-cap: Remove.
+ * libhurd-cap-server: Likewise.
+ * physmem: Likewise.
+ * task: Likewise.
+ * wortel: Likewise.
2008-06-27 Neal H. Walfield <neal@gnu.org>
diff --git a/libhurd-cap-server/ChangeLog b/libhurd-cap-server/ChangeLog
deleted file mode 100644
index db21a71..0000000
--- a/libhurd-cap-server/ChangeLog
+++ /dev/null
@@ -1,499 +0,0 @@
-2005-03-08 Neal H. Walfield <neal@gnu.org>
-
- * Makefile.am (libhurd_cap_server_a_SOURCES): Add ctx-cap-use.c.
- * cap-server.h (struct hurd_cap_rpc_context): Add member HANDLE.
- (struct hurd_cap_ctx_cap_use): New forward.
- (hurd_cap_ctx_size): New declaration.
- (hurd_cap_ctx_start_cap_use): New declaration.
- (hurd_cap_ctx_end_cap_use): New declaration.
- * cap-server-intern.h (struct hurd_cap_ctx_cap_use): New
- structure.
- * bucket-manage-mt.c (manage_demuxer): Save capability handle in
- CTX->handle.
- * ctx-cap-use.c: New file.
-
-2005-01-12 Neal H. Walfield <neal@gnu.org>
-
- * class-init.c (hurd_cap_class_init_untyped): Fix assert.
- * bucket-manage-mt.c (manage_mt_worker): Likewise.
-
-2005-01-10 Bas Wijnen <shevek@fmf.nl>
-
- * README (TODO): Fix spelling errors.
-
-2005-01-07 Neal H. Walfield <neal@gnu.org>
-
- * class-init.c (hurd_cap_class_init_untyped): Supply the
- allocate_buffer and deallocate_buffer arguments to
- hurd_slab_create to conform with the new semantics.
- * obj-entry-space.c (_hurd_cap_obj_entry_space): Likewise for
- HURD_SLAB_SPACE_INITIALIZER.
- * client-create.c (_hurd_cap_client_space): Likewise.
-
-2004-12-01 Neal H. Walfield <neal@gnu.org>
-
- * cap-server-intern.h (struct _hurd_cap_client): Fold the struct
- _hurd_cap_client_entry into the struct _hurd_cap_client.
- * bucket-create.c (_hurd_cap_client_death): Change all uses of
- _hurd_cap_client_entry_t to use the merged _hurd_cap_client_t.
- (hurd_cap_bucket_create): Likewise.
- * bucket-manage-mt.c (lookup_client): Likewise.
- * client-create.c (_hurd_cap_client_create): Likewise.
- * client-release.c (_hurd_cap_client_release): Likewise.
-
-2004-12-01 Neal H. Walfield <neal@gnu.org>
-
- * cap-server-intern.h (_HURD_CAP_CLIENT_ID_BITS): Move from
- ../hurd/types.h. Prepend underscore and update names in
- definition as needed.
- (_HURD_CAP_ID_BITS): Likewise.
- (_HURD_CAP_CLIENT_ID_MASK): Likewise.
- (_HURD_CAP_ID_MASK): Likewise.
- (_hurd_cap_id_t): Likewise.
- (_hurd_cap_client_id_t): Likewise.
- (_hurd_cap_client_id): Likewise.
- (_hurd_cap_id): Likewise.
- (_hurd_cap_handle_make): Likewise.
-
- * bucket-inject.c (hurd_cap_bucket_inject): Update CAP_ID's type.
- * bucket-manage-mt.c (lookup_client): Likewise for CLIENT_ID.
- * cap-server-intern.h (struct _hurd_cap_obj_entry): Likewise for
- ID.
- (_hurd_cap_obj_copy_out): Likewise for CLIENT.
- (struct _hurd_cap_client): Likewise for ID.
- (_hurd_cap_client_release): Likewise for IDX.
- * client-release.c (_hurd_cap_client_release): Likewise for IDX.
- * obj-copy-out.c (_hurd_cap_obj_copy_out): Likewise for CLIENT.
-
-2004-12-01 Marcus Brinkmann <marcus@gnu.org>
-
- * client-create.c (_hurd_cap_client_constructor): Do not
- initialize CLIENT->caps table here.
- (_hurd_cap_client_alloc): Do not initialize CLIENT->lock,
- CLIENT->state or CLIENT->pending_rpcs here.
- Reported by Neal Walfield <neal@gnu.org>.
-
- * bucket-manage-mt.c (manage_demuxer): Rework the logic that tries
- to wait until the client becomes inhibited.
- Reported by Neal Walfield <neal@gnu.org>.
-
-2004-11-30 Neal H. Walfield <neal@gnu.org>
-
- * cap-server.h: Improve comments.
- (hurd_cap_class_create_untyped): Tighten alignment restrictions:
- ALIGNMENT must be a power of 2.
- * class-init.c (hurd_cap_class_init_untyped): Calculate best
- alignment.
-
- * cap-server.h (hurd_cap_obj_get_size): Renamed to ...
- (hurd_cap_obj_user_offset): ... this.
- (hurd_cap_obj_to_user_untyped): Call hurd_cap_obj_user_offset, not
- hurd_cap_obj_get_size.
- (hurd_cap_obj_from_user_untyped): Likewise.
- * class-init.c (hurd_cap_class_init_untyped): Likewise.
-
- * class-create.c: Improve comments.
- * obj-copy-out.c: Likewise.
-
-2004-11-30 Neal H. Walfield <neal@gnu.org>
-
- * cap-server-intern.h (_hurd_cap_obj_drop): Reverse faulty logic.
-
-2004-11-30 Neal H. Walfield <neal@gnu.org>
-
- * bucket-manage-mt.c (manage_mt_get_next_worker): If
- pthread_create_from_l4_tid_np fails, return WORKER to pthread's
- available thread pool.
- (worker_alloc_async): Likewise.
- (hurd_cap_bucket_manage_mt): Likewise.
-
-2004-11-30 Neal H. Walfield <neal@gnu.org>
-
- * bucket-manage-mt.c: Include <compiler.h>.
- (manage_mt_worker): Use EXPECT_TRUE and EXPECT_FALSE rather than
- __builtin_expect. Comment fixes.
- (manage_mt_get_next_worker): Likewise.
- (hurd_cap_bucket_manage_mt): Likewise.
- * cap-server-intern.h: Include <compiler.h>.
- (_hurd_cap_obj_drop): Use EXPECT_FALSE rather than
- __builtin_expect.
- (struct _hurd_cap_bucket): Comment fix.
- * client-release.c: Include <compiler.h>.
- (_hurd_cap_client_release): Use EXPECT_TRUE rather than
- __builtin_expect.
-
-2004-11-30 Neal H. Walfield <neal@gnu.org>
-
- * bucket-create.c (_hurd_cap_client_death): Comparisons use ==,
- not =. Fix it.
-
-2004-11-29 Neal H. Walfield <neal@gnu.org>
-
- * client-create.c (_hurd_cap_client_create): Deallocate the
- gratuitous _hurd_cap_client_t, not the one we want to use.
-
-2004-11-01 Marcus Brinkmann <marcus@gnu.org>
-
- * cap-server.h (hurd_cap_class_create): Rename to ...
- (hurd_cap_class_create_untyped): ... this.
- (hurd_cap_class_create): New macro.
- (hurd_cap_class_init): Rename to ...
- (hurd_cap_class_init_untyped): ... this.
- (hurd_cap_class_init): New macro.
- (hurd_cap_get_obj_size): New inline function.
- (hurd_cap_obj_to_user_untyped, hurd_cap_obj_from_user_untyped):
- New inline function.
- (hurd_cap_obj_to_user, hurd_cap_obj_from_user): New macro.
- * class-alloc.c (hurd_cap_class_alloc): New variable NEW_OBJ, use
- it as a temporary placeholder.
- * class-create.c (hurd_cap_class_create): Rename to ...
- (hurd_cap_class_create_untyped): ... this.
- Use hurd_cap_class_init_untyped.
- * class-init.c (hurd_cap_class_init): Rename to ...
- (hurd_cap_class_init_untyped): ... this.
- Add the size of struct hurd_cap_obj to SIZE.
- * client-create.c (_hurd_cap_client_alloc): New variable
- NEW_CLIENT, use it as a temporary placeholder.
- * obj-copy-out.c (_hurd_cap_obj_copy_out): New variable NEW_ENTRY,
- use it as a temporary placeholder.
-
- * bucket-inhibit.c (hurd_cap_bucket_end): Return something.
-
- * cap-server-intern.h (_hurd_cap_bucket_cond_check): Fix
- assignemnt.
- * table.h (hurd_table_lookup): Remove unused variable ERR.
- * table.c (hurd_table_enter): Likewise.
- * bucket-manage-mt.c (manage_demuxer): Remove unused variable
- FROM.
- (manage_mt_worker): Remove unused variable DEMUXED.
- (worker_alloc_async): Remove unused variable WORKER_ITEM. Fix
- assignments.
- * client-release.c (_hurd_cap_client_dealloc): Remove unused
- variables DONE, CURRENT_IDX.
- (_hurd_cap_client_release): Remove unused variable FOUND.
-
-2004-10-29 Marcus Brinkmann <marcus@gnu.org>
-
- * cap-server.h: Include <atomic.h>.
- (struct hurd_cap_obj): Change type of member refs
- from unsigned int to uatomic32_t.
- (hurd_cap_obj_ref): Use atomic_increment().
- (hurd_cap_obj_rele): Use atomic_decrement().
- * cap-server-intern.h (_hurd_cap_obj_drop): Remove unused variable
- cap_class. Use atomic_decrement_and_test.
- * obj-dealloc.c (_hurd_cap_obj_dealloc): Assert that OBJ->refs is
- 0, not 1. Do not unlock the object.
-
-2004-10-25 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (libhurd_cap_server_a_SOURCES): Add
- bucket-worker-alloc.c.
- * bucket-worker-alloc.c: New file.
- * bucket-create.c (hurd_cap_bucket_create): Initialize
- BUCKET->worker_alloc_sync and BUCKET->worker_alloc_state.
- * cap-server-intern.h (struct _hurd_cap_bucket): New members
- worker_alloc_async and worker_alloc.
- * cap-server.h (hurd_cap_bucket_worker_alloc): New prototype.
- * bucket-manage-mt.c (hurd_cap_bucket_manage_mt): Move
- initialization of acceptor and xfer timeouts to just before the
- IPC (repeat it for every IPC). Set xfer timeout to all zero. Do
- not pay attention to BUCKET->free_worker after cancelling the
- current worker thread at shutdown. Create a worker allocation
- thread if requested, and shut it down before terminating.
- (manage_mt_worker): Take a new argument, ASYNC. If it is set, add
- worker thread to free list at startup, and use a timeout for the
- first wait.
- (manage_mt_worker_async, manage_mt_worker_sync): New wrapper
- functions.
- (worker_alloc_async): New function.
- (manage_mt_get_next_worker):
-
-2004-10-23 Marcus Brinkmann <marcus@gnu.org>
-
- * bucket-manage-mt.c: Include <l4.h>.
- (l4_xreceive, l4_xreceive_timeout): New macros.
- (manage_mt_worker): Use l4_xreceive and l4_xreceive_timeout
- instead l4_wait and l4_wait_timeout.
-
-2004-10-19 Marcus Brinkmann <marcus@gnu.org>
-
- * client-inhibit.c (_hurd_cap_client_resume): Lock CLIENT->lock,
- not BUCKET->lock.
- * obj-inhibit.c (hurd_cap_obj_resume): Lock OBJ->lock, not
- CAP_CLASS->lock.
-
-2004-10-07 Marcus Brinkmann <marcus@gnu.org>
-
- * bucket-manage-mt.c (reply_err): Don't set propagation flag.
- (manage_mt_worker): Clear the propagation flag.
-
-2004-08-02 Peter Bruin <pjbruin@dds.nl>
-
- * cap-server-intern.h (_hurd_cap_bucket_cond_check): Call
- _hurd_cap_bucket_cond_busy instead taking its address.
-
-2004-04-12 Marcus Brinkmann <marcus@gnu.org>
-
- * bucket-manage-mt.c (manage_mt_worker): Set CTX.obj to NULL
- before calling demuxer. If later on CTX.obj is not NULL, call
- manage_demuxer_cleanup after sending the reply message. Set
- propagation flag in message tag.
- (manage_demuxer): Do not release OBJ lock before calling the
- demuxer, do not acquire and release OBJ lock afterwards. Instead,
- acquire a reference to OBJ.
- (reply_err): Set propagation flag in message tag.
-
-2004-04-11 Marco Gerards <metgerards@student.han.nl>
-
- * table.c (hurd_table_init): Return an error code.
-
-2004-04-10 Marcus Brinkmann <marcus@gnu.org>
-
- * bucket-manage-mt.c (manage_mt_worker): Use
- _hurd_cap_list_item_dequeued to set current_worker_is_us.
-
-2004-04-09 Marcus Brinkmann <marcus@gnu.org>
-
- * cap-server.h (hurd_cap_bucket_inject): Change hurd_cap_t to
- hurd_cap_handle_t in R_CAP argument type.
- * bucket-manage-mt.c (manage_demuxer): Likewise.
- * bucket-inject.c (hurd_cap_bucket_inject): Likewise. Use
- hurd_cap_handle_make instead hurd_cap_make.
-
- * obj-copy-out.c (_hurd_cap_obj_copy_out): Set *R_ID after adding
- element.
- * bucket-inject.c (hurd_cap_bucket_inject): Set *R_CAP and return 0.
-
-2004-04-08 Marcus Brinkmann <marcus@gnu.org>
-
- * bucket-inject.c, obj-copy-out.c: New files.
- * Makefile.am (libhurd_cap_server_a_SOURCES): Add bucket-inject.c
- and obj-copy-out.c
- * bucket-create.c (hurd_cap_bucket_create): Initialize members
- is_managed, nr_caps, waiting_rpcs of BUCKET. Set R_BUCKET.
- * cap-server-intern.h (_hurd_cap_client_dealloc): Add new argument
- BUCKET to prototype.
- (struct hurd_cap_client): Remove declaration.
- (struct _hurd_cap_list_item): Add new member tid. Change type for
- member client to _hurd_cap_client_t.
- (_hurd_cap_list_item_add, _hurd_cap_list_item_remove,
- _hurd_cap_list_item_dequeued): New inline functions.
- (struct _hurd_cap_obj_entry): Rename member IDX to ID.
- (_hurd_cap_obj_copy_out): New prototype.
- (_hurd_cap_client_create): Remove argument R_IDX from prototype.
- (struct _hurd_cap_bucket): Add new members MANAGER, IS_MANAGED,
- IS_MANAGER_WAITING, NR_CAPS, WAITING_RPCS, and FREE_WORKER.
- (_hurd_cap_client_t): Type definition moved to ...
- * cap-server.h (_hurd_cap_client_t): Here.
- (struct _hurd_cap_client_t): New declaration.
- (struct hurd_cap_rpc_context): Define it.
- (hurd_cap_class_demux_t): Renamed to ...
- (hurd_cap_class_demuxer_t): ... this.
- (hurd_cap_class_create, hurd_cap_class_init): Use new type for demuxer
- argument in prototype.
- (hurd_cap_bucket_inject): New prototype.
- * cap-server.h: Include <hurd/types.h>
- * class-create (hurd_cap_class_create): Use new type for demuxer
- argument. Set R_CLASS.
- * class-init (hurd_cap_class_init): Use new type for demuxer argument.
- * client-release.c (_hurd_cap_client_dealloc): Take new argument
- BUCKET. New local variable NR_CAPS. Keep track of number of
- capabilities removed. Update BUCKET->nr_caps before return.
- (_hurd_cap_client_release): Pass new argument BUCKET to
- _hurd_cap_client_release.
- * client-create.c (_hurd_cap_client_create): Remove argument
- R_IDX. Consequently, do not set R_IDX anymore. Set R_CLIENT.
- Pass new argument BUCKET to _hurd_cap_client_dealloc.
- * bucket-inhibit.c (hurd_cap_bucket_end): Check BUCKET->nr_caps if
- FORCE flag is not set. Cancel the manager thread if needed.
- (_hurd_cap_bucket_cond_busy): Move to ...
- * cap-server-intern.h (_hurd_cap_bucket_cond_busy): ... here.
- Add attribute always-inline.
- (_hurd_cap_bucket_cond_check): New inline function.
- * client-inhibit.c (_hurd_cap_client_cond_busy): Move to ...
- * cap-server-intern.h (_hurd_cap_client_cond_busy): ... here.
- Add attribute always-inline.
- (_hurd_cap_client_cond_check): New inline function.
- * class-inhibit.c (_hurd_cap_class_cond_busy): Move to ...
- * cap-server-intern.h (_hurd_cap_class_cond_busy): ... here.
- Add attribute always-inline.
- (_hurd_cap_class_cond_check): New inline function.
- * obj-inhibit.c (_hurd_cap_obj_cond_busy): Move to ...
- * cap-server-intern.h (_hurd_cap_obj_cond_busy): ... here.
- Add attribute always-inline.
- (_hurd_cap_obj_cond_check): New inline function.
-
-2004-04-01 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (libhurd_cap_server_a_SOURCES): Remove bucket-add.c,
- bucket-remove.c.
- * bucket-add.c, bucket-remove.c: Files removed.
- * cap-server.h (hurd_cap_bucket_add, hurd_cap_bucket_remove):
- Remove prototypes.
- * cap-server-intern.h (struct _hurd_cap_class_entry): Removed.
- (_hurd_cap_class_entry_t): Removed.
- (struct _hurd_cap_bucket): Remove member classes.
- * bucket-create.c (hurd_cap_bucket_create): Do not initialize
- deleted member classes.
- * bucket-free.c (hurd_cap_bucket_free): Do not destroy deleted
- member classes.
-
-2004-03-26 Marcus Brinkmann <marcus@gnu.org>
-
- * bucket-add.c, bucket-create.c, bucket-free.c, bucket-inhibit.c,
- bucket-manage-mt.c, bucket-remove.c, cap-server-intern.h,
- class-free.c, class-init.c, obj-dealloc.c, obj-drop.c,
- obj-entry-space.c: New files.
- * Makefile.am (includehurd_HEADERS): Move table.h and task-death.h
- to ...
- (libhurd_cap_server_a_SOURCES): ... here. Add
- cap-server-intern.h, class-init.c, class-free.c, obj-dealloc.c,
- obj-drop.c, obj-entry-space.c, bucket-create.c, bucket-free.c,
- bucket-add.c, bucket-remove.c, bucket-inhibit.c,
- bucket-manage-mt.c. Remove class-dealloc.c.
- * headers.m4: Remove table.h and task-death.h.
- * cap-server.h: Include <stdbool.h>, do not include <assert.h>,
- <limits.h>, <hurd/types.h>, <hurd/slab.h>, <hurd/table.h>,
- <hurd/ihash.h> and <hurd/task-death.h>.
- (hurd_cond_wait, struct _hurd_cap_list_item,
- _hurd_cap_list_item_t, _hurd_cap_list_item_add,
- _hurd_cap_list_item_remove, struct _hurd_cap_entry,
- _hurd_cap_entry_t, struct hurd_cap_client, hurd_cap_client_t,
- struct _hurd_cap_client_entry, _hurd_cap_client_entry_t,
- _hurd_cap_class_dealloc, _hurd_cap_client_create,
- hurd_cap_client_lookup, _hurd_cap_client_dealloc,
- hurd_cap_client_inhibit, hurd_cap_client_resume,
- _hurd_cap_client_end): Removed.
- (hurd_cap_rpc_context): New declaration.
- (hurd_cap_class_demux_t): New dummy type (for now).
- (struct hurd_cap_class): Remove members OBJ_SIZE, CLIENT_SLAB,
- CLIENT_COND, CLIENT_COND_LOCK, CLIENTS, CLIENTS_REVERSE,
- CLIENT_THREADS and CLIENT_DEATH_NOTIFY. Change type of OBJ_SLAB
- to struct hurd_slab_space and rename it to OBJ_SPACE. Add member
- DEMUXER. Change type of member STATE to _hurd_cap_state_t.
- (struct hurd_cap_obj): Add member CAP_CLASS. Change type of
- member STATE to _hurd_cap_state_t.
- (hurd_cap_class_create): Add arguments alignment and demuxer to
- prototype.
- (hurd_cap_class_init): Likewise.
- (hurd_cap_class_free): New prototype.
- (_hurd_cap_class_destroy): Rename to ...
- (hurd_cap_class_destroy): ... this.
- (hurd_cap_obj_lock, hurd_cap_obj_unlock, hurd_cap_obj_ref,
- hurd_cap_obj_rele, hurd_cap_obj_drop): Remove CAP_CLASS argument.
- (hurd_cap_obj_drop): Only declare prototype here.
- (struct _hurd_cap_bucket): New forward declaration.
- (hurd_cap_bucket_t): New typedef.
- (hurd_cap_bucket_create, hurd_cap_bucket_free,
- hurd_cap_bucket_add, hurd_cap_bucket_remove,
- hurd_cap_bucket_manage_mt, hurd_cap_bucket_inhibit,
- hurd_cap_bucket_resume, hurd_cap_bucket_end): New prototypes.
- * class-alloc.c, class-create.c, class-destroy.c, class-inhibit.c,
- client-create.c, client-inhibit.c, client-release.c,
- obj-inhibit.c: Change the callers to take into account the above
- changes.
-
-2004-03-25 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (libhurd_cap_server_a_SOURCES): Add class-inhibit.c,
- client-create.c, client-release.c, client-inhibit.c, and
- obj-inhibit.c.
- * README, class-inhibit.c, client-create.c, client-release.c,
- client-inhibit.c, obj-inhibit.c: New files.
- * cap-server.h: Protect inclusion with _HURD_CAP_SERVER_H.
- Include <hurd/types.h>, <hurd/table.h>, <hurd/ihash.h> and
- <hurd/task-death.h>.
- (hurd_cond_wait): Define to pthread_cond_wait for now.
- (struct _hurd_cap_list_item): New struct.
- (_hurd_cap_list_item_t): New typedef.
- (_hurd_cap_list_item_add, _hurd_cap_list_item_remove): New inline
- functions.
- (enum _hurd_cap_state): New enum.
- (struct _hurd_cap_obj): Add members STATE, PENDING_RPCS,
- COND_WAITER, and CLIENTS.
- (hurd_cap_obj_data): Removed.
- (struct _hurd_cap_entry): New struct.
- (_hurd_cap_entry_t): New type.
- (struct hurd_cap_client): New struct.
- (hurd_cap_client_t): New type.
- (struct _hurd_cap_client_entry): New struct.
- (_hurd_cap_client_entry_t): New type.
- (struct hurd_cap_class): Rename member SLAB to OBJ_SLAB. Add new
- members OBJ_COND, OBJ_COND_LOCK, CLIENT_SLAB, CLIENT_COND,
- CLIENT_COND_LOCK, CLIENT_DEATH_NOTIFY, LOCK, CLIENTS,
- CLIENTS_REVERSE, STATE, COND, COND_WAITER and PENDING_RPCS.
- (hurd_cap_class_init, _hurd_cap_class_destroy): New prototypes.
- (hurd_cap_class_dealloc): Rename prototype to ...
- (_hurd_cap_class_dealloc): ... this.
- (hurd_cap_obj_lock, hurd_cap_obj_unlock): Change return type to
- void.
- (hurd_cap_obj_drop): Call _hurd_cap_class_dealloc (not
- hurd_cap_class_dealloc).
- (_hurd_cap_client_create, _hurd_cap_client_dealloc,
- _hurd_cap_client_release): New prototype.
- (hurd_cap_client_lookup): New inline function.
- (hurd_cap_class_inhibit, hurd_cap_class_resume,
- hurd_cap_client_inhibit, hurd_cap_client_resume,
- _hurd_cap_client_end, hurd_cap_obj_inhibit, hurd_cap_obj_resume):
- New prototypes.
- * class-alloc.c (hurd_cap_class_alloc): Use CAP_CLASS->obj_slab,
- not CAP_CLASS->slab. Don't check return value of
- hurd_cap_obj_lock (as it is void now).
- * class-create.c (_hurd_cap_obj_constructor): Initialize
- OBJ->clients, OBJ->state, OBJ->pending_rpcs and OBJ->clients.
- Don't check return value of pthread_mutex_destroy.
- (_hurd_cap_obj_destructor): Destroy OBJ->clients. Don't check
- return value of pthread_mutex_destroy.
- (_hurd_cap_client_constructor, _hurd_cap_client_destructor,
- _hurd_cap_client_death, hurd_cap_class_init): New functions.
- (hurd_cap_class_create): Rewritten in terms of
- hurd_cap_class_init.
- * class-dealloc.c (hurd_cap_class_dealloc): Rename to ...
- (_hurd_cap_class_dealloc): ... this. Do not check return value of
- now void hurd_cap_obj_unlock function. Assert OBJ->state and
- OBJ->pending_rpcs. Use CAP_CLASS->obj_slab, not CAP_CLASS->slab.
- * class-destroy.c: Do not include <assert.h>.
- (_hurd_cap_client_try_destroy): New function.
- (hurd_cap_class_destroy): Rewritten.
- (hurd_cap_class_free): New function.
-
- * task-death.h (task_death_notify_t): Fix return type (should be
- void, not void *).
-
-2004-03-23 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (includehurd_HEADERS): Add table.h and task-death.h.
- (libhurd_cap_server_a_SOURCES): Add task-death.c, remove table.h.
- * task-death.h, task-death.c: New files.
- * headers.m4: Add task-death.h.
-
- * table.h (_HURD_TABLE_ALIGN): New macro.
- (HURD_TABLE_INITIALIZER): Use it for ENTRY_SIZE.
-
- * Makefile.am (AM_CFLAGS): New variable.
- * headers.m4: Add table.h.
- * table.h (hurd_table_entry_t): Type removed.
- (struct hurd_table): Change type of DATA to char *. New member
- ENTRY_SIZE.
- (HURD_TABLE_INITIALIZER): Take argument SIZE_OF_ONE. Initialize
- ENTRY_SIZE.
- (hurd_table_init): Add new argument ENTRY_SIZE to prototype.
- (hurd_table_enter): Change type of DATA to void *.
- (hurd_table_lookup): Change return type to void *. Return 0, not
- HURD_TABLE_EMPTY. Reimplement for new semantics.
- * table.c: Include <assert.h>.
- (hurd_table_init): Add new argument ENTRY_SIZE. Pass it to
- HURD_TABLE_INITIALIZER.
- (hurd_table_enter): Use TABLE->entry_size for reallocation.
- Adjust code to fit new semantics. Add 1 to TABLE->first_free to
- make that point to the element after the one we just added.
-
- * Makefile.am (libhurd_cap_server_a_SOURCES): Add table.h, table.c.
- * table.h, table.c: New files.
-
-2004-03-21 Marcus Brinkmann <marcus@gnu.org>
-
- * cap-server.h (hurd_cap_obj_rele): Require at least two
- references.
-
- * Initial release.
diff --git a/libhurd-cap-server/Makefile.am b/libhurd-cap-server/Makefile.am
deleted file mode 100644
index 1d0c400..0000000
--- a/libhurd-cap-server/Makefile.am
+++ /dev/null
@@ -1,41 +0,0 @@
-# Makefile.am - Makefile template for libhurd-cap-server.
-# Copyright (C) 2004, 2005 Free Software Foundation, Inc.
-# Written by Marcus Brinkmann.
-#
-# This file is part of the GNU Hurd.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free
-# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-# 02111-1307 USA.
-
-lib_LIBRARIES = libhurd-cap-server.a
-
-includehurddir = $(includedir)/hurd
-includehurd_HEADERS = cap-server.h
-
-# FIXME: Build a special libhurd-ihash.a using libc-parts for the rootservers,
-# and a normal for everybody else.
-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/libc-parts
-AM_CFLAGS = -std=gnu99
-libhurd_cap_server_a_SOURCES = table.h table.c \
- task-death.h task-death.c \
- cap-server-intern.h class-init.c class-create.c \
- class-destroy.c class-free.c \
- class-alloc.c class-inhibit.c \
- obj-dealloc.c obj-drop.c obj-inhibit.c \
- obj-entry-space.c obj-copy-out.c \
- client-create.c client-release.c client-inhibit.c \
- bucket-create.c bucket-free.c bucket-inhibit.c \
- bucket-manage-mt.c bucket-worker-alloc.c bucket-inject.c \
- ctx-cap-use.c
diff --git a/libhurd-cap-server/README b/libhurd-cap-server/README
deleted file mode 100644
index ce19f55..0000000
--- a/libhurd-cap-server/README
+++ /dev/null
@@ -1,185 +0,0 @@
-TODO:
-
-* Keep track of number of allocated capabilities or otherwise enable
- to check if a bucket is still in use!!! (so at least a per-client
- count is required).
-* Implement bucket-inhibit correctly. !!! Also see:
-* Decide if it is needed to allow capability allocation
- outside of RPCs (as return arguments). In that case, allocation
- must be blocked while it is checked if no extinct users exists -
- otherwise inhibiting RPCs is good enough.
-* Verify all the bucket/client stuff after rewrite.
-
-* Do we need to support soft references?
-* Extend table and hash interface to reclaim resources without
- reinitialization. !!
-* table.h/table.c should take alignment.
-
-An introduction to the capability system, server side.
-------------------------------------------------------
-
-A server provides services to its clients. In a capability system the
-right to access a given service is called a capability. This right is
-held by a client. It was previously granted to the client by the
-server. The client can then make use of this right, cease it, or
-transfer (copy) it to a new client.
-
-The capability system is object oriented. This means that the
-services granted by the server are represented by remote procedure
-calls (RPCs) invoked on server-side objects. A capability thus
-represents the right to invoke an (arbitrary) RPC on any given object.
-
-The Hurd capability system provides a flexible, yet efficient way, for
-servers to implement services for different types of objects to many
-users.
-
-The server-side objects (capability objects) are organized in
-capability classes. All capability objects in one class have the same
-basic type. Of course, the implementation of the server is free to
-make further distinctions among the objects in one class. But the
-implementation of the capability library makes certain assumptions
-about classes, and some operations always affect all objects in one
-class. Furthermore, all objects in one class share certain
-ressources. Thus, a capability class is an important organizational
-structure.
-
-For example, all open files in a file server can be implemented as one
-capability class. Another capability class could be used for all
-filesystem control capabilities.
-
-
-Capability Objects
-------------------
-
-Any server-side object you want to receive RPCs on is represented by a
-capability object. Clients are then granted access to capability
-objects, which means that they are allowed to invoke RPCs on such
-objects (see "Capabilities").
-
-All capability objects have the same basic storage size, use the same
-constructors and destructors, and are cached in the same slab
-allocator. Capability objects have their own lock and reference
-counter. The lock and reference counter are embedded at the start of
-your own definition of what a capability object should contain:
-
- struct my_cap
- {
- struct hurd_cap_obj obj;
-
- int my_own_data;
- ...
- };
-
-
-Capability objects are cached objects. They are cached in the slab
-allocator provided by libhurd-slab. This improves processor cache
-usage and allows pre-allocation, and makes it possible to keep some
-state even across object life time. The life-time of a capability
-object, and the meaning of the various constructors and destructors,
-can be seen in the following diagram:
-
- 1. Object is constructed in the cache OBJ_INIT
- 2.1. Object is instantiated and removed from the free list OBJ_ALLOC
- 2.2. Object is deallocated and put back on the free list OBJ_REINIT
- 3. Object is destroyed and removed from the cache OBJ_DESTROY
-
- Note that step 2 can occur several times, or not at all.
- This is the state diagram for each object:
-
- START ==(1.)== OBJ_INIT --(3.)--> OBJ_DESTROY = END
- | ^
- | |
- (2.1.) (3.)
- | |
- v |
- OBJ_ALLOC -(2.2.)--> OBJ_REINIT
- ^ |
- | |
- +-------(2.1.)-------+
-
-Capability objects are constructed and initialized in bursts whenever
-a new slab page is allocated for the cache. For this purpose, the
-OBJ_INIT callback is invoked. If the object is used, further
-per-instantiation initialization can be performed by OBJ_ALLOC. Each
-time this happens, the OBJ_REINIT callback is invoked when the object
-becomes deallocated and is returned to the cache. At the end of each
-objects lifetime, when the cache page is destroyed, for example due to
-memory pressure, or because the capability class is destroyed, the
-destructor callback OBJ_DESTROY is called.
-
-OBJ_ALLOC is provided because some resources may not be suitable for
-caching. In particular, OBJ_REINIT must not fail, so any resources
-that can not be safely (ie without errors) reverted to their
-initialized state are not suitable for caching and must be allocated
-with OBJ_ALLOC and destroyed with OBJ_REINIT.
-
-After having defined your own capability objects, you can create a
-capability class from these objects. A capability class is a complex
-structure that allows to grant access to capability objects to users,
-and process the incoming RPCs.
-
-
-Capability Classes
-------------------
-
-Capability classes require a capability object definition (via storage
-size and constructor/destructor callbacks), and a demuxer for
-incoming RPC messages on capability objects from this class.
-
-After creating a class, you will usually want to start a manager
-thread and call one of the RPC manage functions (multi-threaded or
-single-threaded). The manage function starts processing incoming RPCs
-immediately. You can provide a timeout or let it run indefinitely.
-If you specify a timeout, the manage function will exit if there are
-no active client connections for a certain time.
-
-You can also inhibit all RPCs on a class, from the outside or from an
-RPC handler. This will cancel all pending operations (except the
-calling one), and prevents any more messages from being processed.
-Incoming RPCs are blocked (FIXME: except interrupt_rpc).
-
-To prevent DoS attacks, only one RPC per client thread at any time is
-allowed.
-
-
-Clients
--------
-
-Each client gets a capability ID name space. In this name space,
-references for capability objects are kept. If a capability object
-gets revoked, the references become stale.
-
-
-Server Loop
------------
-
-FIXME: Should inhibit class block the server manager thread, or only
-worker threads? The former is resource lighter, the other allows to
-process interrupt messages to interrupt the operation blocking the
-class.
-
-Worker Thread Operation
------------------------
-
-0. Lock cap_class for most of the following. If necessary, first
- block until class state is green.
-1. Check cap_class->client_threads if that client is already in an RPC.
- If yes, drop the message.
-2. Is this a proper RPC? If not, it might be an initial handshake request.
- If it is an initial handshake request, lookup the task ID in
- cap_class->clients_reverse, and if it doesn't exist, add it and return.
- Otherwise proceed.
-3. Lookup the provided client id in the table cap_class->clients
- (range checked), adding a reference. If not found, drop the message.
-4. Add yourself to cap_class->pending_rpcs.
-5. Unlock the cap_class, and lock the client. If necessary, first
- block until client state is green.
-6. Lookup the capability id, adding an internal reference. If not
- found, drop the message.
-7. FIXME: Handle external references and reference container
- transactions here. Otherwise, proceed:
-8. Add ourselves to client->pending_rpcs. Unlock the client, lock the
- capability object. If necessary, first block until cap obj state is green,
-9. Add yourself to cap_obj->pending_rpcs.
-10. Process RPC.
-11. FIXME: Reverse the whole funky business.
diff --git a/libhurd-cap-server/bucket-create.c b/libhurd-cap-server/bucket-create.c
deleted file mode 100644
index e79091c..0000000
--- a/libhurd-cap-server/bucket-create.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* bucket-create.c - Create a capability bucket.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <pthread.h>
-
-#include "cap-server-intern.h"
-
-
-/* Process the task death event for TASK_ID. */
-static void
-_hurd_cap_client_death (void *hook, hurd_task_id_t task_id)
-{
- hurd_cap_bucket_t bucket = (hurd_cap_bucket_t) hook;
- _hurd_cap_client_t client;
-
- pthread_mutex_lock (&bucket->lock);
- client = (_hurd_cap_client_t) hurd_ihash_find (&bucket->clients_reverse,
- task_id);
- if (client)
- {
- /* Found it. We will consume the "client is still living"
- reference, which can only be removed by us. As client death
- notifications are fully serialized, we don't need to take an
- extra reference now. However, we must mark the client entry
- as dead, so that no further references are acquired by
- anybody else. */
- client->dead = 1;
- }
- pthread_mutex_unlock (&bucket->lock);
-
- if (client)
- {
- error_t err;
-
- /* Inhibit all RPCs on this client. This can only fail if we
- are canceled. However, we are the task death manager thread,
- and nobody should cancel us. (FIXME: If it turns out that we
- can be canceled, we should just return on error). */
- err = _hurd_cap_client_inhibit (bucket, client);
- assert (!err);
-
- /* End RPCs on this client. There shouldn't be any (the client
- is dead), but due to small races, there is a slight chance we
- still have a worker thread blocked on an incoming message
- from the now dead client task. */
- _hurd_cap_client_end (bucket, client);
-
-#ifndef NDEBUG
- pthread_mutex_lock (&bucket->lock);
- /* Now, we should have the last reference for this client. */
- assert (client->refs == 1);
- pthread_mutex_unlock (&bucket->lock);
-#endif
-
- /* Release our, the last, reference and deallocate all
- resources, most importantly this will remove us from the
- client table of the class and release the task info
- capability. */
- _hurd_cap_client_release (bucket, client->id);
- }
-}
-
-
-/* Create a new bucket and return it in R_BUCKET. */
-error_t
-hurd_cap_bucket_create (hurd_cap_bucket_t *r_bucket)
-{
- error_t err;
- hurd_cap_bucket_t bucket;
-
- bucket = malloc (sizeof (struct _hurd_cap_bucket));
- if (!bucket)
- return errno;
-
-
- /* Client management. */
-
- err = pthread_cond_init (&bucket->client_cond, NULL);
- if (err)
- goto err_client_cond;
-
- err = pthread_mutex_init (&bucket->client_cond_lock, NULL);
- if (err)
- goto err_client_cond_lock;
-
- /* The client death notifications will be requested when we start to
- serve RPCs on the bucket. */
-
- /* Bucket management. */
-
- err = pthread_mutex_init (&bucket->lock, NULL);
- if (err)
- goto err_lock;
-
- bucket->is_managed = false;
- bucket->state = _HURD_CAP_STATE_GREEN;
-
- err = pthread_cond_init (&bucket->cond, NULL);
- if (err)
- goto err_cond;
-
- /* The member cond_waiter will be initialized when the state changes
- to _HURD_CAP_STATE_YELLOW. */
-
- bucket->nr_caps = 0;
- bucket->pending_rpcs = NULL;
- bucket->waiting_rpcs = NULL;
-
- hurd_ihash_init (&bucket->senders,
- offsetof (struct _hurd_cap_list_item, locp));
-
- err = hurd_table_init (&bucket->clients,
- sizeof (_hurd_cap_client_t));
- if (err)
- goto err_clients;
-
- hurd_ihash_init (&bucket->clients_reverse,
- offsetof (struct _hurd_cap_client, locp));
-
- /* Do not use asynchronous thread allocation by default. */
- bucket->is_worker_alloc_async = false;
- /* We have to leave bucket->worker_alloc uninitialized. That field
- and bucket->worker_alloc_state will be initialized if
- asynchronous worker thread allocation is used. */
-
- /* Finally, add the notify handler. */
- bucket->client_death_notify.notify_handler = _hurd_cap_client_death;
- bucket->client_death_notify.hook = bucket;
- hurd_task_death_notify_add (&bucket->client_death_notify);
-
- *r_bucket = bucket;
- return 0;
-
-#if 0
- /* Provided here in case more error cases are added. */
- hurd_table_destroy (&bucket->clients);
-#endif
-
- err_clients:
- pthread_cond_destroy (&bucket->cond);
- err_cond:
- pthread_mutex_destroy (&bucket->lock);
- err_lock:
- pthread_mutex_destroy (&bucket->client_cond_lock);
- err_client_cond_lock:
- pthread_cond_destroy (&bucket->client_cond);
- err_client_cond:
- free (bucket);
-
- return err;
-}
diff --git a/libhurd-cap-server/bucket-free.c b/libhurd-cap-server/bucket-free.c
deleted file mode 100644
index 5793114..0000000
--- a/libhurd-cap-server/bucket-free.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/* bucket-create.c - Create a capability bucket.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <pthread.h>
-
-#include "cap-server-intern.h"
-
-
-/* Free the bucket BUCKET, which must not be used. */
-void
-hurd_cap_bucket_free (hurd_cap_bucket_t bucket)
-{
- /* FIXME: Add some assertions to ensure it is not used.
- Reintroduce _hurd_cap_client_try_destroy. */
- hurd_table_destroy (&bucket->clients);
- pthread_cond_destroy (&bucket->cond);
- pthread_mutex_destroy (&bucket->lock);
- pthread_mutex_destroy (&bucket->client_cond_lock);
- pthread_cond_destroy (&bucket->client_cond);
- free (bucket);
-}
-
diff --git a/libhurd-cap-server/bucket-inhibit.c b/libhurd-cap-server/bucket-inhibit.c
deleted file mode 100644
index b3484bf..0000000
--- a/libhurd-cap-server/bucket-inhibit.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/* bucket-inhibit.c - Inhibit RPCs on a capability bucket.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Inhibit all RPCs on the capability bucket BUCKET (which must not
- be locked). You _must_ follow up with a hurd_cap_bucket_resume
- operation, and hold at least one reference to the object
- continuously until you did so. */
-error_t
-hurd_cap_bucket_inhibit (hurd_cap_bucket_t bucket)
-{
- error_t err;
-
- pthread_mutex_lock (&bucket->lock);
-
- /* First wait until any other inhibitor has resumed the bucket. If
- this function is called within an RPC, we are going to be
- canceled anyway. Otherwise, it ensures that bucket inhibitions
- are fully serialized (per bucket). */
- /* FIXME: Do something if the state is _HURD_CAP_STATE_BLACK? Can
- only happen if we are called from outside any RPCs. */
- while (bucket->state != _HURD_CAP_STATE_GREEN)
- {
- err = hurd_cond_wait (&bucket->cond, &bucket->lock);
- if (err)
- {
- /* We have been canceled. */
- pthread_mutex_unlock (&bucket->lock);
- return err;
- }
- }
-
- /* Now it is our turn to inhibit the bucket. */
- bucket->cond_waiter = pthread_self ();
-
- if (_hurd_cap_bucket_cond_busy (bucket))
- {
- _hurd_cap_list_item_t pending_rpc = bucket->pending_rpcs;
-
- /* There are still pending RPCs (beside us). Cancel them. */
- while (pending_rpc)
- {
- if (pending_rpc->thread != bucket->cond_waiter)
- pthread_cancel (pending_rpc->thread);
- pending_rpc = pending_rpc->next;
- }
-
- /* Indicate that we would like to know when they have gone. */
- bucket->state = _HURD_CAP_STATE_YELLOW;
-
- /* The last one will shut the door. */
- do
- {
- err = hurd_cond_wait (&bucket->cond, &bucket->lock);
- if (err)
- {
- /* We have been canceled ourselves. Give up. */
- bucket->state = _HURD_CAP_STATE_GREEN;
- pthread_mutex_unlock (&bucket->lock);
- return err;
- }
- }
- while (bucket->state != _HURD_CAP_STATE_RED);
- }
- else
- bucket->state = _HURD_CAP_STATE_RED;
-
- /* Now all pending RPCs have been canceled and are completed (except
- us), and all incoming RPCs are inhibited. */
- pthread_mutex_unlock (&bucket->lock);
-
- return 0;
-}
-
-
-/* Resume RPCs on the bucket BUCKET and wake-up all waiters. */
-void
-hurd_cap_bucket_resume (hurd_cap_bucket_t bucket)
-{
- pthread_mutex_lock (&bucket->lock);
-
- bucket->state = _HURD_CAP_STATE_GREEN;
-
- /* Broadcast the change to all potential waiters. */
- pthread_cond_broadcast (&bucket->cond);
-
- pthread_mutex_unlock (&bucket->lock);
-}
-
-
-/* End management of the bucket BUCKET. */
-error_t
-hurd_cap_bucket_end (hurd_cap_bucket_t bucket, bool force)
-{
- pthread_mutex_lock (&bucket->lock);
-
- if (!force && bucket->nr_caps)
- {
- pthread_mutex_unlock (&bucket->lock);
- return EBUSY;
- }
-
- bucket->state = _HURD_CAP_STATE_BLACK;
-
- /* Broadcast the change to all potential waiters. */
- pthread_cond_broadcast (&bucket->cond);
-
- if (bucket->is_managed && bucket->cond_waiter != bucket->manager)
- pthread_cancel (bucket->manager);
-
- pthread_mutex_unlock (&bucket->lock);
-
- return 0;
-}
diff --git a/libhurd-cap-server/bucket-inject.c b/libhurd-cap-server/bucket-inject.c
deleted file mode 100644
index 0a81b27..0000000
--- a/libhurd-cap-server/bucket-inject.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/* bucket-inject.c - Copy out a capability to a client.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Copy out a capability for the capability OBJ to the client with the
- task ID TASK_ID. Returns the capability (valid only for this user)
- in *R_CAP, or an error. It is not safe to call this from outside
- an RPC on OBJ while the manager is running. */
-error_t
-hurd_cap_bucket_inject (hurd_cap_bucket_t bucket, hurd_cap_obj_t obj,
- hurd_task_id_t task_id, hurd_cap_handle_t *r_cap)
-{
- error_t err;
- _hurd_cap_client_t client;
- _hurd_cap_id_t cap_id;
-
- err = _hurd_cap_client_create (bucket, task_id, &client);
- if (err)
- return err;
-
- pthread_mutex_lock (&obj->lock);
- err = _hurd_cap_obj_copy_out (obj, bucket, client, &cap_id);
- pthread_mutex_unlock (&obj->lock);
- _hurd_cap_client_release (bucket, client->id);
- if (err)
- return err;
-
- *r_cap = _hurd_cap_handle_make (client->id, cap_id);
- return 0;
-}
diff --git a/libhurd-cap-server/bucket-manage-mt.c b/libhurd-cap-server/bucket-manage-mt.c
deleted file mode 100644
index 15de1f1..0000000
--- a/libhurd-cap-server/bucket-manage-mt.c
+++ /dev/null
@@ -1,1116 +0,0 @@
-/* bucket-manage-mt.c - Manage RPCs on a bucket.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <stdint.h>
-#include <pthread.h>
-
-#include <l4.h>
-#include <compiler.h>
-
-#include "cap-server-intern.h"
-
-
-/* When using propagation, the from thread ID returned can differ from
- the one we used for the closed receive. */
-#define l4_xreceive_timeout(from,timeout,fromp) \
- (l4_ipc (l4_nilthread, from, timeout, fromp))
-#define l4_xreceive(from,fromp) \
- l4_xreceive_timeout (from, l4_timeouts (L4_ZERO_TIME, L4_NEVER), fromp)
-
-
-/* FIXME: Throughout this file, for debugging, the behaviour could be
- relaxed to return errors to callers which would otherwise be
- ignored (due to malformed requests etc). */
-
-
-/* FIXME: This section contains a lot of random junk that maybe should
- be somewhere else (or not). */
-
-/* Cancel the pending RPC of the specified thread. */
-#define HURD_CAP_MSG_LABEL_CANCEL 0x100
-#define HURD_CAP_MSG_LABEL_GET_ROOT 0x101
-
-/* Some error for this. */
-#define ECAP_NOREPLY 0x10001
-#define ECAP_DIED 0x10002
-
-
-/* The msg labels of the reply from the worker to the manager. */
-#define _HURD_CAP_MSG_WORKER_ACCEPTED 0
-#define _HURD_CAP_MSG_WORKER_REJECTED 1
-
-struct worker_info
-{
- /* The bucket. */
- hurd_cap_bucket_t bucket;
-
- /* The manager thread. */
- l4_thread_id_t manager_tid;
-
- /* The timeout for the worker thread as an L4 time period. */
- l4_time_t timeout;
-};
-
-
-static void
-__attribute__((always_inline))
-reply_err (l4_thread_id_t to, error_t err)
-{
-#define HURD_L4_ERROR_LABEL ((uint16_t) INT16_MIN)
-#define HURD_L4_ERROR_TAG ((HURD_L4_ERROR_LABEL << 16) & 1)
- l4_msg_tag_t tag = HURD_L4_ERROR_TAG;
-
- l4_set_msg_tag (tag);
- l4_load_mr (1, err);
- l4_reply (to);
-}
-
-
-/* Lookup the client with client ID CLIENT_ID and return it in
- *R_CLIENT with one reference for the entry in the bucket. It is
- verified that the client is in fact the one with the task ID
- TASK_ID. */
-static error_t
-__attribute__((always_inline))
-lookup_client (hurd_cap_bucket_t bucket, _hurd_cap_client_id_t client_id,
- hurd_task_id_t task_id, _hurd_cap_client_t *r_client)
-{
- error_t err = 0;
- _hurd_cap_client_t *clientp;
-
- pthread_mutex_lock (&bucket->lock);
- /* Look up the client by its ID. */
- clientp = hurd_table_lookup (&bucket->clients, client_id);
- if (!clientp || (*clientp)->dead || (*clientp)->task_id != task_id)
- err = ECAP_NOREPLY;
- else
- {
- (*clientp)->refs++;
- *r_client = *clientp;
- }
- pthread_mutex_unlock (&bucket->lock);
-
- return err;
-}
-
-
-/* Process the message CTX->MSG from thread CTX->FROM in worker thread
- CTX->WORKER of bucket CTX->BUCKET. Return ECAP_NOREPLY if no reply
- should be sent. Any other error will be replied to the user. If 0
- is returned, CTX->MSG must contain the reply message. */
-static error_t
-__attribute__((always_inline))
-manage_demuxer (hurd_cap_rpc_context_t ctx, _hurd_cap_list_item_t worker)
-{
- error_t err = 0;
- hurd_cap_bucket_t bucket = ctx->bucket;
- _hurd_cap_client_t client;
- hurd_cap_class_t cap_class;
- hurd_cap_obj_t obj;
- _hurd_cap_obj_entry_t obj_entry;
- struct _hurd_cap_list_item worker_client;
- struct _hurd_cap_list_item worker_class;
- struct _hurd_cap_list_item worker_obj;
-
- worker_client.thread = worker->thread;
- worker_client.tid = worker->tid;
- worker_client.next = NULL;
- worker_client.prevp = NULL;
-
- worker_class = worker_client;
- worker_obj = worker_client;
-
- if (l4_msg_label (ctx->msg) == HURD_CAP_MSG_LABEL_GET_ROOT)
- {
- /* This is the "get the root capability" RPC. FIXME: Needs to
- be implemented. */
- return ENOSYS;
- }
-
- /* Every normal RPC must have at least one untyped word, which
- contains the client and capability ID. Otherwise the message is
- malformed, and thus ignored. */
- if (l4_untyped_words (l4_msg_msg_tag (ctx->msg)) < 1)
- return ECAP_NOREPLY;
- ctx->handle = l4_msg_word (ctx->msg, 0);
-
- err = lookup_client (bucket, _hurd_cap_client_id (ctx->handle),
- ctx->sender, &client);
- if (err)
- return err;
-
- /* At this point, CLIENT_ID and CLIENT are valid, and we have one
- reference for the client. */
-
- pthread_mutex_lock (&client->lock);
- /* First, we have to check if the class is inhibited, and if it is,
- we have to wait until it is uninhibited. */
- if (EXPECT_FALSE (client->state == _HURD_CAP_STATE_BLACK))
- err = ECAP_NOREPLY;
- else if (EXPECT_FALSE (client->state != _HURD_CAP_STATE_GREEN))
- {
- pthread_mutex_unlock (&client->lock);
- pthread_mutex_lock (&bucket->client_cond_lock);
- pthread_mutex_lock (&client->lock);
- while (!err && client->state != _HURD_CAP_STATE_GREEN)
- {
- if (client->state == _HURD_CAP_STATE_BLACK)
- err = ECAP_NOREPLY;
- else
- {
- pthread_mutex_unlock (&client->lock);
- err = hurd_cond_wait (&bucket->client_cond,
- &bucket->client_cond_lock);
- pthread_mutex_lock (&client->lock);
- }
- }
- pthread_mutex_unlock (&bucket->client_cond_lock);
- }
- if (err)
- {
- pthread_mutex_unlock (&client->lock);
- /* Either the client died, or we have been canceled. */
- _hurd_cap_client_release (bucket, client->id);
- return err;
- }
-
- {
- _hurd_cap_obj_entry_t *entry;
-
- entry = (_hurd_cap_obj_entry_t *)
- hurd_table_lookup (&client->caps, _hurd_cap_id (ctx->handle));
- if (!entry)
- err = ECAP_NOREPLY;
- else
- {
- obj_entry = *entry;
-
- if (EXPECT_FALSE (!obj_entry->external_refs))
- err = ECAP_NOREPLY;
- else if (EXPECT_FALSE (obj_entry->dead))
- err = ECAP_DIED;
- else
- {
- obj_entry->internal_refs++;
- obj = obj_entry->cap_obj;
- }
- }
- }
- if (err)
- {
- /* Either the capability ID is invalid, or it was revoked. */
- pthread_mutex_unlock (&client->lock);
- _hurd_cap_client_release (bucket, client->id);
- return err;
- }
-
- /* At this point, CAP_ID, OBJ_ENTRY and OBJ are valid. We have one
- internal reference for the capability entry. */
-
- /* Add ourself to the pending_rpcs list of the client. */
- _hurd_cap_list_item_add (&client->pending_rpcs, &worker_client);
- pthread_mutex_unlock (&client->lock);
-
- cap_class = obj->cap_class;
-
- pthread_mutex_lock (&cap_class->lock);
- /* First, we have to check if the class is inhibited, and if it is,
- we have to wait until it is uninhibited. */
- while (!err && cap_class->state != _HURD_CAP_STATE_GREEN)
- err = hurd_cond_wait (&cap_class->cond, &cap_class->lock);
- if (err)
- {
- /* Canceled. */
- pthread_mutex_unlock (&cap_class->lock);
- goto client_cleanup;
- }
-
- _hurd_cap_list_item_add (&cap_class->pending_rpcs, &worker_class);
- pthread_mutex_unlock (&cap_class->lock);
-
-
- pthread_mutex_lock (&obj->lock);
- /* First, we have to check if the object is inhibited, and if it is,
- we have to wait until it is uninhibited. */
- if (obj->state != _HURD_CAP_STATE_GREEN)
- {
- pthread_mutex_unlock (&obj->lock);
- pthread_mutex_lock (&cap_class->obj_cond_lock);
- pthread_mutex_lock (&obj->lock);
- while (!err && obj->state != _HURD_CAP_STATE_GREEN)
- {
- pthread_mutex_unlock (&obj->lock);
- err = hurd_cond_wait (&cap_class->obj_cond,
- &cap_class->obj_cond_lock);
- pthread_mutex_lock (&obj->lock);
- }
- pthread_mutex_unlock (&cap_class->obj_cond_lock);
- }
- if (err)
- {
- /* Canceled. */
- pthread_mutex_unlock (&obj->lock);
- goto class_cleanup;
- }
-
- /* Now check if the client still has the capability, or if it was
- revoked. */
- pthread_mutex_lock (&client->lock);
- if (obj_entry->dead)
- err = ECAP_DIED;
- pthread_mutex_unlock (&client->lock);
- if (err)
- {
- /* The capability was revoked in the meantime. */
- pthread_mutex_unlock (&obj->lock);
- goto class_cleanup;
- }
- _hurd_cap_list_item_add (&obj->pending_rpcs, &worker_obj);
-
- /* At this point, we have looked up the capability, acquired an
- internal reference for its entry in the client table (which
- implicitely keeps a reference acquired for the object itself),
- acquired a reference for the capability client in the bucket, and
- have added an item to the pending_rpcs lists in the client, class
- and object. The object is locked. With all this, we can finally
- start to process the message for real. */
-
- /* FIXME: Call the internal demuxer here, for things like reference
- counter modification, cap passing etc. */
-
- /* Invoke the class-specific demuxer. */
- ctx->client = client;
- ctx->obj = obj;
- err = (*cap_class->demuxer) (ctx);
-
- /* Clean up. OBJ is still locked. */
- _hurd_cap_list_item_remove (&worker_obj);
- _hurd_cap_obj_cond_check (obj);
-
- /* Instead releasing the lock for the object, we hold it until
- manage_demuxer_cleanup is called. This is important, because the
- object must be locked until the reply message is sent. Consider
- the impact of map items or string items. FIXME: Alternatively,
- let the user set a flag if the object is locked upon return (and
- must be kept lock continuously until the reply is sent). OTOH,
- releasing a lock just to take it again is also pretty useless.
- Needs performance measurements to make a good decision. */
- hurd_cap_obj_ref (obj);
-
- class_cleanup:
- pthread_mutex_lock (&cap_class->lock);
- _hurd_cap_list_item_remove (&worker_class);
- _hurd_cap_class_cond_check (cap_class);
- pthread_mutex_unlock (&cap_class->lock);
-
- client_cleanup:
- pthread_mutex_lock (&client->lock);
- _hurd_cap_list_item_remove (&worker_client);
- _hurd_cap_client_cond_check (bucket, client);
-
- /* You are not allowed to revoke a capability while there are
- pending RPCs on it. This is the reason why we know that there
- must be at least one extra internal reference. FIXME: For
- cleanliness, this could still call some inline function that does
- the decrement. The assert can be a hint to the compiler to
- optimize the inline function expansion anyway. */
- assert (!obj_entry->dead);
- assert (obj_entry->internal_refs > 1);
- obj_entry->internal_refs--;
- pthread_mutex_unlock (&client->lock);
-
- _hurd_cap_client_release (bucket, client->id);
-
- return err;
-}
-
-
-static void
-__attribute__((always_inline))
-manage_demuxer_cleanup (hurd_cap_rpc_context_t ctx)
-{
- hurd_cap_obj_drop (ctx->obj);
-}
-
-
-/* A worker thread for RPC processing. The behaviour of this function
- is tightly integrated with the behaviour of the manager thread. */
-static void *
-manage_mt_worker (void *arg, bool async)
-{
- struct worker_info *info = (struct worker_info *) arg;
- hurd_cap_bucket_t bucket = info->bucket;
- struct _hurd_cap_list_item worker_item;
- _hurd_cap_list_item_t worker = &worker_item;
- l4_thread_id_t manager = info->manager_tid;
- l4_time_t timeout = info->timeout;
- l4_thread_id_t from;
- l4_msg_tag_t msg_tag;
- bool current_worker_is_us;
-
- /* Prepare the worker queue item. [SYNC: As we are always the
- current worker thread when we are started up, we do not add
- ourselves to the free list.] */
- worker->thread = pthread_self ();
- worker->tid = l4_myself ();
- worker->next = NULL;
- worker->prevp = NULL;
-
- if (EXPECT_FALSE (async))
- {
- /* We have to add ourselves to the free list and inform the
- worker_alloc_async thread. */
- pthread_mutex_lock (&bucket->lock);
-
- if (bucket->is_manager_waiting && !bucket->free_worker)
- {
- /* The manager is starving for worker threads. */
- pthread_cond_broadcast (&bucket->cond);
- }
- _hurd_cap_list_item_add (&bucket->free_worker, worker);
-
- /* Notify the worker_alloc_async thread that we have started up
- and added ourselves to the free list. */
- bucket->worker_alloc_state = _HURD_CAP_STATE_RED;
-
- /* This will wake up the worker_alloc_async thread, but also the
- manager in case it is blocked on getting a new worker
- thread. */
- pthread_cond_broadcast (&bucket->cond);
- pthread_mutex_unlock (&bucket->lock);
-
- /* We do not know if we will be the current worker thread or
- not, so we must wait with a timeout. */
- msg_tag = l4_xreceive_timeout (manager, timeout, &from);
- }
- else
- {
- /* When we are started up, we are supposed to listen as soon as
- possible to the next incoming message. When we know we are the
- current worker thread, we do this without a timeout. */
- msg_tag = l4_xreceive (manager, &from);
- }
-
- while (1)
- {
- if (EXPECT_FALSE (l4_ipc_failed (msg_tag)))
- {
- /* Slow path. */
-
- l4_word_t err_code = l4_error_code ();
- l4_word_t ipc_err = (err_code >> 1) & 0x7;
-
- if (ipc_err == L4_IPC_CANCELED || ipc_err == L4_IPC_ABORTED)
- /* We have been canceled for shutdown. */
- break;
-
- /* The only other error that can happen is a timeout waiting
- for the message. */
- assert (ipc_err == L4_IPC_TIMEOUT);
-
- pthread_mutex_lock (&bucket->lock);
- /* If we are not on the free queue, we are the current worker. */
- current_worker_is_us = _hurd_cap_list_item_dequeued (worker);
- pthread_mutex_unlock (&bucket->lock);
-
- /* If we are not the current worker, then we can just exit
- now because of our timeout. */
- if (!current_worker_is_us)
- break;
-
- /* If we are the current worker, we should wait here for the
- next message without a timeout. */
- msg_tag = l4_xreceive (manager, &from);
- /* From here, we will loop all over to the beginning of the
- while(1) block. */
- }
- else
- {
- /* Fast path. Process the RPC. */
- error_t err = 0;
- struct hurd_cap_rpc_context ctx;
- bool inhibited = false;
-
- /* IMPORTANT NOTE: The manager thread is blocked until we
- reply a message with a label MSG_ACCEPTED or
- MSG_REJECTED. We are supposed to return such a message
- as quickly as possible. In the accepted case, we should
- then process the message, while in the rejected case we
- should rapidly go into the next receive. */
-
- /* Before we can work on the message, we need to copy it.
- This is because the MRs holding the message might be
- overridden by the pthread implementation or other
- function calls we make. In particular,
- pthread_mutex_lock is can mangle the message buffer. */
- l4_msg_store (msg_tag, ctx.msg);
-
- assert (l4_ipc_propagated (msg_tag));
- assert (l4_is_thread_equal (l4_actual_sender (), manager));
-
- pthread_mutex_lock (&bucket->lock);
- /* We process cancellation messages regardless of the
- bucket state. */
- if (EXPECT_FALSE (l4_msg_label (ctx.msg)
- == HURD_CAP_MSG_LABEL_CANCEL))
- {
- if (l4_untyped_words (l4_msg_msg_tag (ctx.msg)) == 1)
- {
- l4_thread_id_t tid = l4_msg_word (ctx.msg, 0);
-
- /* First verify access. Threads are only allowed to
- cancel RPCs from other threads in the task. */
- if (hurd_task_id_from_thread_id (tid)
- == hurd_task_id_from_thread_id (from))
- {
- /* We allow cancel requests even if normal RPCs
- are inhibited. */
- _hurd_cap_list_item_t pending_worker;
-
- pending_worker = hurd_ihash_find (&bucket->senders,
- tid);
- if (!pending_worker)
- reply_err (from, ESRCH);
- else
- {
- /* Found it. Cancel it. */
- pthread_cancel (pending_worker->thread);
- /* Reply success. */
- reply_err (from, 0);
- }
- }
- }
- /* Set the error variable so that we return to the
- manager immediately. */
- err = ECAP_NOREPLY;
- }
- else
- {
- /* Normal RPCs. */
- if (EXPECT_FALSE (bucket->state == _HURD_CAP_STATE_BLACK))
- {
- /* The bucket operations have been ended, and the
- manager has already been canceled. We know that
- the BUCKET->senders hash is empty, so we can
- quickly process the message. */
-
- /* This is a normal RPC. We cancel it immediately. */
- reply_err (from, ECANCELED);
-
- /* Now set ERR to any error, so we return to the
- manager. */
- err = ECAP_NOREPLY; /* Doesn't matter which error. */
- }
- else
- {
- if (EXPECT_FALSE (bucket->state != _HURD_CAP_STATE_GREEN))
- {
- /* If we are inhibited, we will have to wait
- until we are uninhibited. */
- inhibited = true;
- }
-
- /* FIXME: This is inefficient. ihash should support
- an "add if not there" function. */
- if (EXPECT_FALSE (hurd_ihash_find (&bucket->senders, from)))
- err = EBUSY;
- else
- {
- /* FIXME: We know intimately that pthread_self is not
- _HURD_IHASH_EMPTY or _HURD_IHASH_DELETED. */
- err = hurd_ihash_add (&bucket->senders, from, worker);
- }
- }
- }
-
- if (EXPECT_FALSE (err))
- {
- pthread_mutex_unlock (&bucket->lock);
-
- /* Either we already processed the message above, or
- this user thread is currently in an RPC. We don't
- allow asynchronous operation for security reason
- (preventing DoS attacks). Silently drop the
- message. */
- msg_tag = l4_niltag;
- l4_set_msg_label (&msg_tag, _HURD_CAP_MSG_WORKER_REJECTED);
- l4_load_mr (0, msg_tag);
-
- /* Reply to the manager that we don't accept the message
- and wait for the next message without a timeout
- (because now we know we are the current worker). */
- from = manager;
- msg_tag = l4_lreply_wait (manager, &from);
-
- /* From here, we will loop all over to the beginning of
- the while(1) block. */
- }
- else
- {
- _hurd_cap_list_item_add (inhibited ? &bucket->waiting_rpcs
- : &bucket->pending_rpcs, worker);
- pthread_mutex_unlock (&bucket->lock);
-
- msg_tag = l4_niltag;
- l4_set_msg_label (&msg_tag, _HURD_CAP_MSG_WORKER_ACCEPTED);
- l4_load_mr (0, msg_tag);
- msg_tag = l4_reply (manager);
- assert (l4_ipc_succeeded (msg_tag));
-
- /* Now we are "detached" from the manager in the sense
- that we are not the current worker thread
- anymore. */
-
- if (EXPECT_FALSE (inhibited))
- {
- pthread_mutex_lock (&bucket->lock);
- while (!err && bucket->state != _HURD_CAP_STATE_GREEN
- && bucket->state != _HURD_CAP_STATE_BLACK)
- err = hurd_cond_wait (&bucket->cond, &bucket->lock);
- if (!err)
- {
- if (bucket->state == _HURD_CAP_STATE_BLACK)
- err = ECANCELED;
- else
- {
- /* State is _HURD_CAP_STATE_GREEN. Move
- ourselves to the pending RPC list. */
- _hurd_cap_list_item_remove (worker);
- _hurd_cap_list_item_add (&bucket->pending_rpcs,
- worker);
- }
- }
- pthread_mutex_unlock (&bucket->lock);
- }
-
- if (EXPECT_TRUE (!err))
- {
- /* Process the message. */
- ctx.sender = hurd_task_id_from_thread_id (from);
- ctx.bucket = bucket;
- ctx.from = from;
- ctx.obj = NULL;
- err = manage_demuxer (&ctx, worker);
- }
-
- /* Post-processing. */
-
- pthread_mutex_lock (&bucket->lock);
- /* We have to add ourselves to the free list before (or
- at the same time) as removing the client from the
- pending hash, and before replying to the RPC (if we
- reply in the worker thread at all). The former is
- necessary to make sure that no new thread is created
- in the race that would otherwise exist, namely after
- replying and before adding ourself to the free list.
- The latter is required because a client that
- immediately follows up with a new message of course
- can expect that to work properly. */
-
- if (EXPECT_FALSE (bucket->is_manager_waiting
- && !bucket->free_worker))
- {
- /* The manager is starving for worker threads. */
- pthread_cond_broadcast (&bucket->cond);
- }
-
- /* Remove from pending_rpcs (or waiting_rpcs) list. */
- _hurd_cap_list_item_remove (worker);
- /* The last waiting RPC may have to signal the manager. */
- if (EXPECT_FALSE (inhibited
- && bucket->state == _HURD_CAP_STATE_BLACK
- && !bucket->waiting_rpcs))
- pthread_cond_broadcast (&bucket->cond);
- _hurd_cap_list_item_add (&bucket->free_worker, worker);
-
- _hurd_cap_bucket_cond_check (bucket);
-
- /* Now that we are back on the free list it is safe to
- let in the next RPC by this thread. */
- hurd_ihash_locp_remove (&bucket->senders, worker->locp);
-
- /* FIXME: Reap the cancellation flag here. If it was
- set, we have been effectively unblocked now. From
- now on, canceling us means something different than
- cancelling a pending RPC (it means terminating the
- worker thread). */
-
- pthread_mutex_unlock (&bucket->lock);
-
- /* Finally, return the reply message, if appropriate. */
- if (EXPECT_TRUE (err != ECAP_NOREPLY))
- {
- if (EXPECT_FALSE (err))
- reply_err (from, err);
- else
- {
- /* We must make sure the message tag is set. */
- l4_msg_tag_t tag = l4_msg_msg_tag (ctx.msg);
- l4_clear_propagation (&tag);
- l4_set_msg_msg_tag (ctx.msg, tag);
- l4_msg_load (ctx.msg);
- l4_reply (from);
- }
- }
-
- if (ctx.obj)
- manage_demuxer_cleanup (&ctx);
-
- /* Now listen for the next message, with a timeout. */
- from = manager;
- msg_tag = l4_xreceive_timeout (manager, timeout, &from);
-
- /* From here, we will loop to the beginning of the
- while(1) block. */
- }
- }
- }
-
- /* At this point, we have been canceled while being on the free
- list, so we should go away. */
-
- pthread_mutex_lock (&bucket->lock);
- if (_hurd_cap_list_item_dequeued (worker))
- {
- /* We are the current worker thread. We are the last worker
- thread the manager thread will cancel. */
- pthread_cond_broadcast (&bucket->cond);
- }
- else
- {
- _hurd_cap_list_item_remove (worker);
- if (bucket->is_manager_waiting && !bucket->free_worker)
- {
- /* The manager is shutting down. We are the last free
- worker (except for the current worker thread) to be
- canceled. */
- pthread_cond_broadcast (&bucket->cond);
- }
- }
- pthread_mutex_unlock (&bucket->lock);
-
- return NULL;
-}
-
-
-/* A worker thread for RPC processing. The behaviour of this function
- is tightly integrated with the behaviour of the manager thread. */
-static void *
-manage_mt_worker_sync (void *arg)
-{
- return manage_mt_worker (arg, false);
-}
-
-
-/* A worker thread for RPC processing. The behaviour of this function
- is tightly integrated with the behaviour of the manager thread. */
-static void *
-manage_mt_worker_async (void *arg)
-{
- return manage_mt_worker (arg, true);
-}
-
-
-/* Return the next free worker thread. If no free worker thread is
- available, create a new one. If that fails, block until one
- becomes free. If we are interrupted while blocking, return
- l4_nilthread. */
-static l4_thread_id_t
-manage_mt_get_next_worker (struct worker_info *info, pthread_t *worker_thread)
-{
- hurd_cap_bucket_t bucket = info->bucket;
- l4_thread_id_t worker = l4_nilthread;
- _hurd_cap_list_item_t worker_item;
-
- pthread_mutex_lock (&bucket->lock);
-
- if (EXPECT_FALSE (bucket->free_worker == NULL))
- {
- /* Slow path. Create a new thread and use that. */
- error_t err;
-
- pthread_mutex_unlock (&bucket->lock);
- worker = pthread_pool_get_np ();
- if (worker == l4_nilthread)
- err = EAGAIN;
- else
- {
- err = pthread_create_from_l4_tid_np (worker_thread, NULL,
- worker, manage_mt_worker_sync,
- info);
- /* Return the thread to the pool. */
- if (err)
- pthread_pool_add_np (worker);
- }
-
- if (!err)
- {
- pthread_detach (*worker_thread);
- return worker;
- }
- else
- {
- pthread_mutex_lock (&bucket->lock);
- if (!bucket->free_worker)
- {
- /* Creating a new thread failed. As a last resort, put
- ourself to sleep until we are woken up by the next
- free worker. Hopefully not all workers are blocking
- forever. */
-
- /* FIXME: To fix the case where all workers are blocking
- forever, cancel one (or more? all?) (random? oldest?)
- worker threads. Usually, that user will restart, but
- it will nevertheless allow us to make some (although
- slow) process. */
-
- /* The next worker thread that adds itself to the free
- list will broadcast the condition. */
- bucket->is_manager_waiting = true;
- do
- err = hurd_cond_wait (&bucket->cond, &bucket->lock);
- while (!err && !bucket->free_worker);
-
- if (err)
- {
- pthread_mutex_unlock (&bucket->lock);
- return l4_nilthread;
- }
- }
- }
- }
-
- /* Fast path. A worker thread is available. Remove it from the
- free list and use it. */
- worker_item = bucket->free_worker;
- _hurd_cap_list_item_remove (worker_item);
- pthread_mutex_unlock (&bucket->lock);
-
- *worker_thread = worker_item->thread;
- return worker_item->tid;
-}
-
-
-/* A worker thread for allocating new worker threads. Only used if
- asynchronous worker thread allocation is requested. This is only
- necessary (and useful) for physmem, to break out of a potential
- dead-lock with the task server. */
-static void *
-worker_alloc_async (void *arg)
-{
- struct worker_info *info = (struct worker_info *) arg;
- hurd_cap_bucket_t bucket = info->bucket;
- error_t err;
-
- pthread_mutex_lock (&bucket->lock);
- if (bucket->state == _HURD_CAP_STATE_BLACK)
- {
- pthread_mutex_unlock (&bucket->lock);
- return NULL;
- }
-
- while (1)
- {
- err = hurd_cond_wait (&bucket->cond, &bucket->lock);
- /* We ignore the error, as the only error that can occur is
- ECANCELED, and only if the bucket state has gone to black for
- shutdown. */
- if (bucket->state == _HURD_CAP_STATE_BLACK)
- break;
-
- if (bucket->worker_alloc_state == _HURD_CAP_STATE_GREEN)
- {
- l4_thread_id_t worker = l4_nilthread;
- pthread_t worker_thread;
-
- pthread_mutex_unlock (&bucket->lock);
-
- worker = pthread_pool_get_np ();
- if (worker == l4_nilthread)
- err = EAGAIN;
- else
- {
- err = pthread_create_from_l4_tid_np (&worker_thread, NULL,
- worker,
- manage_mt_worker_async,
- info);
- /* Return the thread to the pool. */
- if (err)
- pthread_pool_add_np (worker);
- }
-
- if (!err)
- {
- pthread_detach (worker_thread);
-
- pthread_mutex_lock (&bucket->lock);
- bucket->worker_alloc_state = _HURD_CAP_STATE_YELLOW;
- /* We ignore any error, as the only error that can occur
- is ECANCELED, and only if the bucket state goes to
- black for shutdown. But particularly in that case we
- want to wait until the thread has fully come up and
- entered the free list, so it's properly accounted for
- and will be canceled at shutdown by the manager. */
- while (bucket->worker_alloc_state == _HURD_CAP_STATE_YELLOW)
- err = hurd_cond_wait (&bucket->cond, &bucket->lock);
-
- /* Will be set by the started thread. */
- assert (bucket->worker_alloc_state == _HURD_CAP_STATE_RED);
- }
- else
- {
- pthread_mutex_lock (&bucket->lock);
- bucket->worker_alloc_state = _HURD_CAP_STATE_RED;
- }
-
- if (bucket->state == _HURD_CAP_STATE_BLACK)
- break;
- }
- }
-
- bucket->worker_alloc_state = _HURD_CAP_STATE_BLACK;
- pthread_mutex_unlock (&bucket->lock);
-
- return NULL;
-}
-
-
-
-/* Start managing RPCs on the bucket BUCKET. The ROOT capability
- object, which must be unlocked and have one reference throughout
- the whole time this function runs, is used for bootstrapping client
- connections. The GLOBAL_TIMEOUT parameter specifies the number of
- seconds until the manager times out (if there are no active users).
- The WORKER_TIMEOUT parameter specifies the number of seconds until
- each worker thread times out (if there are no RPCs processed by the
- worker thread).
-
- If this returns ECANCELED, then hurd_cap_bucket_end was called with
- the force flag being true while there were still active users. If
- this returns without any error, then the timeout expired, or
- hurd_cap_bucket_end was called without active users. */
-error_t
-hurd_cap_bucket_manage_mt (hurd_cap_bucket_t bucket,
- hurd_cap_obj_t root,
- unsigned int global_timeout_sec,
- unsigned int worker_timeout_sec)
-{
- error_t err;
- l4_time_t global_timeout;
- pthread_t worker_thread;
- l4_thread_id_t worker;
- struct worker_info info;
- _hurd_cap_list_item_t item;
-
- global_timeout = (global_timeout_sec == 0) ? L4_NEVER
- : l4_time_period (UINT64_C (1000000) * global_timeout_sec);
-
- info.bucket = bucket;
- info.manager_tid = l4_myself ();
- info.timeout = (worker_timeout_sec == 0) ? L4_NEVER
- : l4_time_period (UINT64_C (1000000) * worker_timeout_sec);
-
- /* We create the first worker thread ourselves, to catch any
- possible error at this stage and bail out properly if needed. */
- worker = pthread_pool_get_np ();
- if (worker == l4_nilthread)
- return EAGAIN;
- err = pthread_create_from_l4_tid_np (&worker_thread, NULL,
- worker, manage_mt_worker_sync, &info);
- if (err)
- {
- /* Return the thread to the pool. */
- pthread_pool_add_np (worker);
- return err;
- }
- pthread_detach (worker_thread);
-
- pthread_mutex_lock (&bucket->lock);
- if (bucket->is_worker_alloc_async)
- {
- /* Prevent creation of new worker threads initially. */
- bucket->worker_alloc_state = _HURD_CAP_STATE_RED;
-
- /* Asynchronous worker thread allocation is requested. */
- err = pthread_create (&bucket->worker_alloc, NULL,
- worker_alloc_async, &info);
-
- if (err)
- {
- /* Cancel the worker thread. */
- pthread_cancel (worker_thread);
- hurd_cond_wait (&bucket->cond, &bucket->lock);
- pthread_mutex_unlock (&bucket->lock);
- return err;
- }
- }
- bucket->manager = pthread_self ();
- bucket->is_managed = true;
- bucket->is_manager_waiting = false;
- pthread_mutex_unlock (&bucket->lock);
-
- while (1)
- {
- l4_thread_id_t from = l4_anythread;
- l4_msg_tag_t msg_tag;
-
- /* We never accept any map or grant items. FIXME: For now, we
- also do not accept any string buffer items. */
- l4_accept (L4_UNTYPED_WORDS_ACCEPTOR);
-
- /* Because we do not accept any string items, we do not actually
- need to set the Xfer timeouts. But this is what we want to set
- them to when we eventually do support string items. */
- l4_set_xfer_timeouts (l4_timeouts (L4_ZERO_TIME, L4_ZERO_TIME));
-
- /* FIXME: Make sure we have enabled deferred cancellation, and
- use an L4 ipc() stub that supports that. In fact, this must
- be true for most of the IPC operations in this file. */
- msg_tag = l4_wait_timeout (global_timeout, &from);
-
- if (EXPECT_FALSE (l4_ipc_failed (msg_tag)))
- {
- l4_word_t err_code = l4_error_code ();
-
- /* FIXME: We need a macro or inline function for that. */
- l4_word_t ipc_err = (err_code >> 1) & 0x7;
-
- /* There are two possible errors, cancellation or timeout.
- Any other error indicates a bug in the code. */
- if (ipc_err == L4_IPC_CANCELED || ipc_err == L4_IPC_ABORTED)
- {
- /* If we are canceled, then this means that our state is
- now _HURD_CAP_STATE_BLACK and we should end managing
- RPCs even if there are still active users. */
-
- pthread_mutex_lock (&bucket->lock);
- assert (bucket->state == _HURD_CAP_STATE_BLACK);
- err = ECANCELED;
- break;
- }
- else
- {
- assert (((err_code >> 1) & 0x7) == L4_IPC_TIMEOUT);
-
- pthread_mutex_lock (&bucket->lock);
- /* Check if we can time out safely. */
- if (bucket->state == _HURD_CAP_STATE_GREEN
- && !bucket->nr_caps && !bucket->pending_rpcs
- && !bucket->waiting_rpcs)
- {
- err = 0;
- break;
- }
- pthread_mutex_unlock (&bucket->lock);
- }
- }
- else
- {
- /* Propagate the message to the worker thread. */
- l4_set_propagation (&msg_tag);
- l4_set_virtual_sender (from);
- l4_set_msg_tag (msg_tag);
-
- /* FIXME: Make sure to use a non-cancellable l4_lcall that
- does preserve any pending cancellation flag for this
- thread. Alternatively, we can handle cancellation here
- (reply ECANCELED to user, and enter shutdown
- sequence. */
- msg_tag = l4_lcall (worker);
- assert (l4_ipc_succeeded (msg_tag));
-
- if (EXPECT_TRUE (l4_label (msg_tag)
- == _HURD_CAP_MSG_WORKER_ACCEPTED))
- {
- worker = manage_mt_get_next_worker (&info, &worker_thread);
- if (worker == l4_nilthread)
- {
- /* The manage_mt_get_next_worker thread was
- canceled. In this case we have to terminate
- ourselves. */
- err = hurd_cap_bucket_inhibit (bucket);
- assert (!err);
- hurd_cap_bucket_end (bucket, true);
-
- pthread_mutex_lock (&bucket->lock);
- err = ECANCELED;
- break;
- }
- }
- }
- }
-
- /* At this point, bucket->lock is held. Start the shutdown
- sequence. */
- assert (!bucket->pending_rpcs);
-
- /* First shutdown the allocator thread, if any. */
- if (bucket->is_worker_alloc_async)
- {
- pthread_cancel (bucket->worker_alloc);
- pthread_join (bucket->worker_alloc, NULL);
- }
-
- /* Now force all the waiting rpcs onto the free list. They will
- have noticed the state change to _HURD_CAP_STATE_BLACK already,
- we just have to block until the last one wakes us up. */
- while (bucket->waiting_rpcs)
- hurd_cond_wait (&bucket->cond, &bucket->lock);
-
- /* Cancel the free workers. */
- item = bucket->free_worker;
- while (item)
- {
- pthread_cancel (item->thread);
- item = item->next;
- }
-
- /* Request the condition to be broadcasted. */
- bucket->is_manager_waiting = true;
-
- while (bucket->free_worker)
- {
- /* We ignore cancellations at this point, because we are already
- shutting down. */
- hurd_cond_wait (&bucket->cond, &bucket->lock);
- }
-
- /* Now cancel the current worker, except if we were canceled while
- trying to get a new one (in which case there is no current
- worker). */
- if (worker != l4_nilthread)
- {
- pthread_cancel (worker_thread);
- hurd_cond_wait (&bucket->cond, &bucket->lock);
- }
-
- bucket->is_managed = false;
- pthread_mutex_unlock (&bucket->lock);
-
- return err;
-}
diff --git a/libhurd-cap-server/bucket-worker-alloc.c b/libhurd-cap-server/bucket-worker-alloc.c
deleted file mode 100644
index be1167d..0000000
--- a/libhurd-cap-server/bucket-worker-alloc.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/* bucket-worker-alloc.c - Set the worker allocation policy.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-
-#include "cap-server-intern.h"
-
-
-/* If ASYNC is true, allocate worker threads asynchronously whenever
- the number of worker threads is exhausted. This is only actually
- required for physmem (the physical memory server), to allow to
- break out of a dead-lock between physmem and the task server. It
- should be unnecessary for any other server.
-
- The default is to false, which means that worker threads are
- allocated synchronously by the manager thread.
-
- This function should be called before the manager is started with
- hurd_cap_bucket_manage_mt. It is only used for the multi-threaded
- RPC manager. */
-error_t
-hurd_cap_bucket_worker_alloc (hurd_cap_bucket_t bucket, bool async)
-{
- pthread_mutex_lock (&bucket->lock);
- bucket->is_worker_alloc_async = async;
- pthread_mutex_unlock (&bucket->lock);
-
- return 0;
-}
diff --git a/libhurd-cap-server/cap-server-intern.h b/libhurd-cap-server/cap-server-intern.h
deleted file mode 100644
index 8d87116..0000000
--- a/libhurd-cap-server/cap-server-intern.h
+++ /dev/null
@@ -1,599 +0,0 @@
-/* cap-server-intern.h - Internal interface to the Hurd capability library.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef _HURD_CAP_SERVER_INTERN_H
-#define _HURD_CAP_SERVER_INTERN_H 1
-
-#include <hurd/types.h>
-#include <hurd/ihash.h>
-#include <hurd/cap-server.h>
-
-#include <compiler.h>
-
-#include "table.h"
-#include "task-death.h"
-
-
-/* FIXME: For now. */
-#define hurd_cond_wait pthread_cond_wait
-
-/* Every capability hurd_cap_handle_t consists of two parts: The upper
- part is a client ID and the lower part is a capability object ID.
- The client ID is as long as the task ID (which is as long as the
- version ID). The cap ID occupies the remainder. We intimately
- know that even on 64 bit architectures, both fit into a 32 bit
- integer value. */
-#define _HURD_CAP_CLIENT_ID_BITS HURD_TASK_ID_BITS
-#define _HURD_CAP_ID_BITS ((sizeof (hurd_cap_handle_t) * 8) \
- - HURD_TASK_ID_BITS)
-
-#define _HURD_CAP_CLIENT_ID_MASK \
- ((L4_WORD_C(1) << _HURD_CAP_CLIENT_ID_BITS) - 1)
-#define _HURD_CAP_ID_MASK ((L4_WORD_C(1) << _HURD_CAP_ID_BITS) - 1)
-
-typedef l4_uint32_t _hurd_cap_id_t;
-typedef l4_uint32_t _hurd_cap_client_id_t;
-
-
-/* Get the capability ID from a user capability. The capabililty ID
- is an index into the caps table of a client. */
-static inline _hurd_cap_client_id_t
-__attribute__((always_inline))
-_hurd_cap_client_id (hurd_cap_handle_t cap)
-{
- return cap >> _HURD_CAP_ID_BITS;
-}
-
-
-/* Get the capability ID from a user capability. The capabililty ID
- is an index into the caps table of a client. */
-static inline _hurd_cap_id_t
-__attribute__((always_inline))
-_hurd_cap_id (hurd_cap_handle_t cap)
-{
- return cap & _HURD_CAP_ID_MASK;
-}
-
-
-/* Create a new capability handle from the client and cap ID. */
-static inline hurd_cap_handle_t
-__attribute__((always_inline))
-_hurd_cap_handle_make (_hurd_cap_client_id_t client_id, _hurd_cap_id_t cap_id)
-{
- return ((client_id & _HURD_CAP_CLIENT_ID_MASK) << _HURD_CAP_ID_BITS)
- | (cap_id & _HURD_CAP_ID_MASK);
-}
-
-
-/* This is a simple list item, used to maintain lists of pending RPC
- worker threads in a class, client or capability object. */
-struct _hurd_cap_list_item
-{
- _hurd_cap_list_item_t next;
- _hurd_cap_list_item_t *prevp;
-
- union
- {
- /* Used for maintaining lists of pending RPC worker threads in a
- class, client or capability object. */
- struct
- {
- /* This location pointer is used for fast removal from the
- BUCKET->senders. */
- hurd_ihash_locp_t locp;
-
- /* The worker thread processing the RPC. */
- pthread_t thread;
-
- /* The worker thread L4 thread ID. */
- l4_thread_id_t tid;
- };
-
- /* Used for reverse lookup of capability clients using a
- capability object. */
- struct
- {
- _hurd_cap_client_t client;
- };
- };
-};
-
-/* Add the list item ITEM to the list LIST. */
-static inline void
-__attribute__((always_inline))
-_hurd_cap_list_item_add (_hurd_cap_list_item_t *list,
- _hurd_cap_list_item_t item)
-{
- if (*list)
- (*list)->prevp = &item->next;
- item->prevp = list;
- item->next = *list;
- *list = item;
-}
-
-
-/* Remove the list item ITEM from the list. */
-static inline void
-__attribute__((always_inline))
-_hurd_cap_list_item_remove (_hurd_cap_list_item_t item)
-{
- if (item->next)
- item->next->prevp = item->prevp;
- *(item->prevp) = item->next;
- item->prevp = NULL;
-}
-
-
-/* Check if the item ITEM is dequeued or not. */
-static inline bool
-__attribute__((always_inline))
-_hurd_cap_list_item_dequeued (_hurd_cap_list_item_t item)
-{
- return item->prevp == NULL;
-}
-
-
-/* Deallocate the capability object OBJ, which must be locked and have
- no more references. */
-void _hurd_cap_obj_dealloc (hurd_cap_obj_t obj)
- __attribute__((visibility("hidden")));
-
-
-/* Remove one reference for the capability object OBJ, which must be
- locked, and will be unlocked when the function returns. If this
- was the last user of this object, the object is deallocated. */
-static inline void
-_hurd_cap_obj_drop (hurd_cap_obj_t obj)
-{
- if (EXPECT_TRUE (!atomic_decrement_and_test (&obj->refs)))
- hurd_cap_obj_unlock (obj);
- else
- _hurd_cap_obj_dealloc (obj);
-}
-
-
-/* Client capabilities. */
-
-/* The following data type is used pointed to by an entry in the caps
- table of a client. Except noted otherwise, its members are
- protected by the same lock as the table. */
-struct _hurd_cap_obj_entry
-{
- /* The capability object. */
- hurd_cap_obj_t cap_obj;
-
- /* The index in the capability table. */
- _hurd_cap_id_t id;
-
- /* A list item that is used for reverse lookup from the capability
- object to the client. Protected by the lock of the capability
- object. */
- struct _hurd_cap_list_item client_item;
-
- /* A flag that indicates if this capability is dead (revoked). Note
- that CAP_OBJ is still valid until all internal references have
- been removed. */
- unsigned int dead : 1;
-
- /* The number of internal references. These are references taken by
- the server for its own purpose. In fact, there is one such
- reference for all outstanding external references (if the dead
- flag is not set), and one for each pending RPC that uses this
- capability. If this reference counter drops to zero, the one
- real capability object reference held by this capability entry is
- released, and CAP_OBJ becomes invalid. */
- unsigned int internal_refs : 15;
-
- /* The number of external references. These are references granted
- to the user. For all these references, one internal reference is
- taken, unless the DEAD flag is set. */
- unsigned int external_refs : 16;
-};
-typedef struct _hurd_cap_obj_entry *_hurd_cap_obj_entry_t;
-
-
-/* The global slab for all capability entries. */
-extern struct hurd_slab_space _hurd_cap_obj_entry_space
- __attribute__((visibility("hidden")));
-
-/* Copy out a capability for the capability OBJ to the user CLIENT.
- Returns the capability ID (valid only for this user) in *R_ID, or
- an error. OBJ must be locked. */
-error_t _hurd_cap_obj_copy_out (hurd_cap_obj_t obj, hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client, _hurd_cap_id_t *r_id)
- __attribute__((visibility("hidden")));
-
-
-/* Client connections. */
-
-
-/* Instances of the following data type are pointed to by the client
- table of a bucket. Its members are protected by the same lock as
- the table. The data type holds all the information about a client
- connection. */
-struct _hurd_cap_client
-{
- /* A flag that indicates if this capability client is dead. This
- data structure is valid until all references have been removed,
- though. Note that because there is one reference for the task
- info capability, this means that CLIENT is valid until a task
- death notification has been processed for this client. Protected
- by the containing bucket's lock LOCK. */
- unsigned int dead : 1;
-
- /* Reference counter. A reference is held if the client is live
- (and removed by the task death handler). One reference is held
- by each pending RPC. Temporarily, additional references may be
- held by RPCs that have just started, but they will rip themselves
- when they see the DEAD flag. Protected by the containing
- bucket's lock LOCK. */
- unsigned int refs : 31;
-
- /* The task ID of the client. */
- hurd_task_id_t task_id;
-
- /* The index of the client in the client table of the bucket.
- This is here so that we can hash for the address of this struct
- in the clients_reverse hash of the bucket, and still get the
- index number. This allows us to use a location pointer for
- removal (locp) for fast hash removal. */
- _hurd_cap_client_id_t id;
-
- /* The location pointer for fast removal from the reverse lookup
- hash BUCKET->clients_reverse. This is protected by the bucket
- lock. */
- hurd_ihash_locp_t locp;
-
- /* The lock protecting all the following members. Client locks can
- be taken while capability object locks are held! This is very
- important when copying or removing capabilities. On the other
- hand, this means you are not allowed to lock cabaility objects
- when holding a client lock. */
- pthread_mutex_t lock;
-
- /* The state of the client. If this is _HURD_CAP_STATE_GREEN, you
- can process RPCs for this client. Otherwise, you should drop
- RPCs for this client. If this is _HURD_CAP_STATE_YELLOW, and you
- are the last pending_rpc to finish, you have to broadcast the
- client_cond of the bucket. */
- enum _hurd_cap_state state;
-
- /* The current waiter thread. This is only valid if state is
- _HURD_CAP_STATE_YELLOW. Used by _hurd_cap_client_cond_busy (). */
- pthread_t cond_waiter;
-
- /* The pending RPC list. Each RPC worker thread should add itself
- to this list, so it can be cancelled by the task death
- notification handler. */
- struct _hurd_cap_list_item *pending_rpcs;
-
- /* The _hurd_cap_id_t to _hurd_cap_obj_entry_t mapping. */
- struct hurd_table caps;
-
- /* Reverse lookup from hurd_cap_obj_t to _hurd_cap_obj_entry_t. */
- struct hurd_ihash caps_reverse;
-};
-
-
-/* The global slab space for all capability clients. */
-extern struct hurd_slab_space _hurd_cap_client_space
- __attribute__((visibility("hidden")));
-
-
-/* Look up the client with the task ID TASK in the class CLASS, and
- return it in R_CLIENT, with one additional reference. If it is not
- found, create it. */
-error_t _hurd_cap_client_create (hurd_cap_bucket_t bucket,
- hurd_task_id_t task_id,
- _hurd_cap_client_t *r_client)
- __attribute__((visibility("hidden")));
-
-
-/* Deallocate the connection client CLIENT. */
-void _hurd_cap_client_dealloc (hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client)
- __attribute__((visibility("hidden")));
-
-
-/* Release a reference for the client with the ID IDX in class
- CLASS. */
-void _hurd_cap_client_release (hurd_cap_bucket_t bucket,
- _hurd_cap_client_id_t idx)
- __attribute__((visibility("hidden")));
-
-
-/* Inhibit all RPCs on the capability client CLIENT (which must not be
- locked) in the bucket BUCKET. You _must_ follow up with a
- hurd_cap_client_resume operation, and hold at least one reference
- to the object continuously until you did so. */
-error_t _hurd_cap_client_inhibit (hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client)
- __attribute__((visibility("hidden")));
-
-
-/* Resume RPCs on the capability client CLIENT in the bucket BUCKET
- and wake-up all waiters. */
-void _hurd_cap_client_resume (hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client)
- __attribute__((visibility("hidden")));
-
-
-/* End RPCs on the capability client CLIENT in the bucket BUCKET and
- wake-up all waiters. */
-void _hurd_cap_client_end (hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client)
- __attribute__((visibility("hidden")));
-
-
-/* Buckets are a set of capabilities, on which RPCs are managed
- collectively. */
-
-struct _hurd_cap_bucket
-{
- /* Client management. */
-
- /* The following condition is used in conjunction with the state
- predicate of the client associated with the the currently
- processed death task notification. */
- pthread_cond_t client_cond;
-
- /* The following mutex is associated with the client_cond condition.
- Note that this mutex does _not_ protect the predicate of the
- condition: The predicate is the state of the respective client
- and that is protected by the lock of each client itself. In
- fact, this mutex has no purpose but to serve the condition. The
- reason is that this way we avoid lock contention when checking
- the state of a client.
-
- So, first you lock the client structure. You have to do this
- anyway. Then you check the state. If the state is
- _HURD_CAP_STATE_GREEN, you can unlock the client and continue
- normally. However, if the state is _HURD_CAP_STATE_YELLOW, you
- have to unlock the client, lock this mutex, then lock the client
- again and reinvestigate the state. If necessary (ie, you are the
- last RPC except the waiter) you can set the state to
- _HURD_CAP_STATE_RED and broadcast the condition. This sounds
- cumbersome, but the important part is that the common case, the
- _HURD_CAP_STATE_GREEN, is handled quickly and without class-wide
- lock contention. */
- pthread_mutex_t client_cond_lock;
-
- /* The following entry is protected by hurd_task_death_notify_lock. */
- struct hurd_task_death_notify_list_item client_death_notify;
-
-
- /* Bucket management. */
-
- /* The following members are protected by this lock. */
- pthread_mutex_t lock;
-
- /* The manager thread for this capability class. */
- pthread_t manager;
-
- /* True if MANAGER is valid and the bucket is managed. */
- bool is_managed;
-
- /* If this is true, then the manager is waiting for the free worker
- list to become empty (at shutdown) or filled (else). The first
- worker thread to notice that the condition is fulfilled now
- should broadcast the condition. */
- bool is_manager_waiting;
-
- /* The state of the bucket. */
- _hurd_cap_state_t state;
-
- /* The condition used to wait on state changes and changes in the
- worker thread list. */
- pthread_cond_t cond;
-
- /* The thread waiting for the RPCs to be inhibited. */
- pthread_t cond_waiter;
-
- /* The number of capabilities. If this is not 0, then there are
- active users. */
- unsigned int nr_caps;
-
- /* The pending RPCs in this bucket. */
- _hurd_cap_list_item_t pending_rpcs;
-
- /* The waiting RPCs in this bucket. */
- _hurd_cap_list_item_t waiting_rpcs;
-
- /* The free worker threads in this bucket. */
- _hurd_cap_list_item_t free_worker;
-
- /* A hash from l4_thread_id_t to _hurd_cap_list_item_t (the list
- items in PENDING_RPCs). This is used to limit each client thread
- to just one RPC at one time. */
- struct hurd_ihash senders;
-
- /* Mapping from hurd_cap_client_id_t to _hurd_cap_client_t. */
- struct hurd_table clients;
-
- /* Reverse lookup from hurd_task_id_t to _hurd_cap_client_t. */
- struct hurd_ihash clients_reverse;
-
- /* This is true if worker threads should be allocated
- asynchronously. */
- bool is_worker_alloc_async;
-
- /* If WORKER_ALLOC_ASYNC is true, this is the state of the worker
- thread allocation thread. If this is _HURD_CAP_STATE_GREEN, then
- a new thread should be allocated. If this is
- _HURD_CAP_STATE_YELLOW, the worker thread has allocated a new
- thread, and is currently waiting for the thread to complete its
- startup. If this is _HURD_CAP_STATE_RED, the new worker thread
- has completed its startup (if one was started) and no new thread
- will be allocated. */
- _hurd_cap_state_t worker_alloc_state;
-
- /* If WORKER_ALLOC_ASYNC is true, this is the allocator thread. */
- pthread_t worker_alloc;
-};
-
-
-/* Return true if there are still outstanding RPCs in this bucket
- BUCKET, and fails if not. This is only valid if
- hurd_cap_bucket_inhibit is in progress (ie, if bucket->state is
- _HURD_CAP_STATE_YELLOW). BUCKET must be locked. */
-static inline int
-__attribute__((always_inline))
-_hurd_cap_bucket_cond_busy (hurd_cap_bucket_t bucket)
-{
- /* We have to remain in the state yellow until there are no pending
- RPC threads except maybe the waiter. */
- return bucket->pending_rpcs
- && (bucket->pending_rpcs->thread != bucket->cond_waiter
- || bucket->pending_rpcs->next);
-}
-
-
-/* Check if the inhibition state of the capability bucket BUCKET has
- to be changed. BUCKET must be locked. */
-static inline void
-__attribute__((always_inline))
-_hurd_cap_bucket_cond_check (hurd_cap_bucket_t bucket)
-{
- if (bucket->state == _HURD_CAP_STATE_YELLOW
- && !_hurd_cap_bucket_cond_busy (bucket))
- {
- bucket->state =_HURD_CAP_STATE_RED;
- pthread_cond_broadcast (&bucket->cond);
- }
-}
-
-
-/* Capability clients. */
-
-/* Return true if there are still outstanding RPCs in this capability
- client, and fails if not. CLIENT must be locked. This is only
- valid if hurd_cap_client_inhibit is in progress (ie, if
- client->state is _HURD_CAP_STATE_YELLOW). */
-static inline int
-__attribute__((always_inline))
-_hurd_cap_client_cond_busy (_hurd_cap_client_t client)
-{
- /* We have to remain in the state yellow until there are no pending
- RPC threads except maybe the waiter. */
- return client->pending_rpcs
- && (client->pending_rpcs->thread != client->cond_waiter
- || client->pending_rpcs->next);
-}
-
-
-/* Check if the inhibited state of the capability client CLIENT has to
- be changed. CLIENT must be locked. */
-static inline void
-__attribute__((always_inline))
-_hurd_cap_client_cond_check (hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client)
-{
- if (client->state == _HURD_CAP_STATE_YELLOW
- && !_hurd_cap_client_cond_busy (client))
- {
- client->state = _HURD_CAP_STATE_RED;
- pthread_cond_broadcast (&bucket->client_cond);
- }
-}
-
-
-/* Capability classes. */
-
-/* Return true if there are still outstanding RPCs in this class, and
- fails if not. CAP_CLASS must be locked. This is only valid if
- hurd_cap_class_inhibit is in progress (ie, if cap_class->state is
- _HURD_CAP_STATE_YELLOW). */
-static inline int
-__attribute__((always_inline))
-_hurd_cap_class_cond_busy (hurd_cap_class_t cap_class)
-{
- /* We have to remain in the state yellow until there are no pending
- RPC threads except maybe the waiter. */
- return cap_class->pending_rpcs
- && (cap_class->pending_rpcs->thread != cap_class->cond_waiter
- || cap_class->pending_rpcs->next);
-}
-
-
-/* Check if the inhibition state of the capability class CAP_CLASS has
- to be changed. CAP_CLASS must be locked. */
-static inline void
-__attribute__((always_inline))
-_hurd_cap_class_cond_check (hurd_cap_class_t cap_class)
-{
- if (cap_class->state == _HURD_CAP_STATE_YELLOW
- && !_hurd_cap_class_cond_busy (cap_class))
- {
- cap_class->state = _HURD_CAP_STATE_RED;
- pthread_cond_broadcast (&cap_class->cond);
- }
-}
-
-
-/* Capability objects. */
-
-/* Return true if there are still outstanding RPCs in this capability
- object, and fails if not. OBJ must be locked. This is only valid
- if hurd_cap_obj_inhibit is in progress (ie, if cap_obj->state is
- _HURD_CAP_STATE_YELLOW). */
-static inline int
-__attribute__((always_inline))
-_hurd_cap_obj_cond_busy (hurd_cap_obj_t obj)
-{
- /* We have to remain in the state yellow until there are no pending
- RPC threads except maybe the waiter. */
- return obj->pending_rpcs
- && (obj->pending_rpcs->thread != obj->cond_waiter
- || obj->pending_rpcs->next);
-}
-
-
-/* Check if the inhibition state of the capability class CAP_CLASS has
- to be changed. CAP_CLASS must be locked. */
-static inline void
-__attribute__((always_inline))
-_hurd_cap_obj_cond_check (hurd_cap_obj_t obj)
-{
- if (obj->state == _HURD_CAP_STATE_YELLOW
- && !_hurd_cap_obj_cond_busy (obj))
- {
- obj->state = _HURD_CAP_STATE_RED;
- pthread_cond_broadcast (&obj->cap_class->cond);
- }
-}
-
-/* The following structure is used when using other capabilities in an
- RPC handler beside the one on which the RPC was invoked. */
-struct hurd_cap_ctx_cap_use
-{
- /* Private members. */
-
- _hurd_cap_obj_entry_t _obj_entry;
-
- /* The pending_rpc list item for the object's pending RPC list. */
- struct _hurd_cap_list_item _worker_obj;
-
- /* The pending_rpc list item for the object class' pending RPC list. */
- struct _hurd_cap_list_item _worker_class;
-};
-
-
-#endif /* _HURD_CAP_SERVER_INTERN_H */
diff --git a/libhurd-cap-server/cap-server.h b/libhurd-cap-server/cap-server.h
deleted file mode 100644
index e12e986..0000000
--- a/libhurd-cap-server/cap-server.h
+++ /dev/null
@@ -1,573 +0,0 @@
-/* cap-server.h - Server interface to the Hurd capability library.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef _HURD_CAP_SERVER_H
-#define _HURD_CAP_SERVER_H 1
-
-#include <stdbool.h>
-#include <errno.h>
-#include <pthread.h>
-
-/* FIXME: This is not a public header file! So we may have to ship
- a <hurd/atomic.h>. */
-#include <atomic.h>
-
-#include <hurd/slab.h>
-#include <hurd/types.h>
-
-
-/* Internal declarations. */
-
-/* This is a simple list item, used to maintain lists of pending RPC
- worker threads in a capability class, client, or object. */
-struct _hurd_cap_list_item;
-typedef struct _hurd_cap_list_item *_hurd_cap_list_item_t;
-
-
-/* The state of a capability class, client, or object. */
-typedef enum _hurd_cap_state
- {
- _HURD_CAP_STATE_GREEN,
- _HURD_CAP_STATE_YELLOW,
- _HURD_CAP_STATE_RED,
- _HURD_CAP_STATE_BLACK
- }
-_hurd_cap_state_t;
-
-
-/* Public interface. */
-
-/* Forward declarations. */
-struct _hurd_cap_bucket;
-typedef struct _hurd_cap_bucket *hurd_cap_bucket_t;
-struct _hurd_cap_client;
-typedef struct _hurd_cap_client *_hurd_cap_client_t;
-struct hurd_cap_class;
-typedef struct hurd_cap_class *hurd_cap_class_t;
-struct hurd_cap_obj;
-typedef struct hurd_cap_obj *hurd_cap_obj_t;
-
-
-typedef error_t (*hurd_cap_obj_init_t) (hurd_cap_class_t cap_class,
- hurd_cap_obj_t obj);
-typedef error_t (*hurd_cap_obj_alloc_t) (hurd_cap_class_t cap_class,
- hurd_cap_obj_t obj);
-typedef void (*hurd_cap_obj_reinit_t) (hurd_cap_class_t cap_class,
- hurd_cap_obj_t obj);
-typedef void (*hurd_cap_obj_destroy_t) (hurd_cap_class_t cap_class,
- hurd_cap_obj_t obj);
-
-
-/* The RPC context contains various information for the RPC handler
- and the support functions. */
-struct hurd_cap_rpc_context
-{
- /* Public members. */
-
- /* The task which contained the sender of the message. */
- hurd_task_id_t sender;
-
- /* The bucket through which the message was received. */
- hurd_cap_bucket_t bucket;
-
- /* The capability object on which the RPC was invoked. */
- hurd_cap_obj_t obj;
-
- /* The capability handle on which the RPC was invoked. */
- hurd_cap_handle_t handle;
-
- /* Private members. */
-
- /* The sender of the message. */
- l4_thread_id_t from;
-
- /* The client corresponding to FROM. */
- _hurd_cap_client_t client;
-
- /* The message. */
- l4_msg_t msg;
-};
-typedef struct hurd_cap_rpc_context *hurd_cap_rpc_context_t;
-
-/* FIXME: Add documentation. */
-typedef error_t (*hurd_cap_class_demuxer_t) (hurd_cap_rpc_context_t ctx);
-
-
-/* A capability class is a group of capability objects of the same
- type. */
-struct hurd_cap_class
-{
- /* Capability object management for the class. */
-
- /* The following callbacks are used to adjust the state of an object
- during its lifetime:
-
- 1. Object is constructed in the cache OBJ_INIT
- 2.1. Object is instantiated and removed from the free list OBJ_ALLOC
- 2.2. Object is deallocated and put back on the free list OBJ_REINIT
- 3. Object is destroyed and removed from the cache OBJ_DESTROY
-
- Note that step 2 can occur several times, or not at all.
- This is the state diagram for each object:
-
- (START) --(1.)-> initialized --(3.)--> destroyed (END)
- | ^
- | |
- (2.1.) (2.2.)
- | |
- v |
- allocated
-
- Note that OBJ_INIT will be called in bursts for pre-allocation of
- several objects. */
-
- /* This callback is invoked whenever a new object is pre-allocated
- in the cache. It is usually called in bursts when a new slab
- page is allocated. You can put all initialization in it that
- should be cached. */
- hurd_cap_obj_init_t obj_init;
-
- /* This callback is called whenever an object in the cache is going
- to be instantiated and used. You can put further initialization
- in it that is not suitable for caching (for example, because it
- can not be safely reinitialized by OBJ_REINIT). If OBJ_ALLOC
- fails, then it must leave the object in its initialized
- state! */
- hurd_cap_obj_alloc_t obj_alloc;
-
- /* This callback is invoked whenever a used object is deallocated
- and returned to the cache. It should revert the used object to
- its initialized state, this means as if OBJ_INIT had been called
- on a freshly constructed object. This also means that you have
- to deallocate all resources that have been allocated by
- OBJ_ALLOC. Note that this function can not fail. Initialization
- that can not be safely (error-free) reverted to its original
- state must be put into the OBJ_ALLOC callback, rather than in the
- OBJ_INIT callback. */
- hurd_cap_obj_reinit_t obj_reinit;
-
- /* This callback is invoked whenever an initialized, but unused
- object is removed from the cache and destroyed. You should
- release all resources that have been allocated for this object by
- a previous OBJ_INIT invocation. */
- hurd_cap_obj_destroy_t obj_destroy;
-
- /* The slab space containing the capabilities in this class. */
- struct hurd_slab_space obj_space;
-
- /* The following condition is used in conjunction with the state
- predicate of a capability object. */
- pthread_cond_t obj_cond;
-
- /* The following mutex is associated with the obj_cond
- condition. Note that this mutex does _not_ protect the predicate
- of the condition: The predicate is the state of the respective
- client and that is protected by the lock of each capability
- object itself. */
- pthread_mutex_t obj_cond_lock;
-
-
- /* The class management. */
-
- /* The demuxer for this class. */
- hurd_cap_class_demuxer_t demuxer;
-
- /* The lock protecting all the following members. */
- pthread_mutex_t lock;
-
- /* The state of the class. */
- _hurd_cap_state_t state;
-
- /* The condition used for waiting on state changes. The associated
- mutex is LOCK. */
- pthread_cond_t cond;
-
- /* The current waiter thread. This is only valid if state is
- _HURD_CAP_STATE_YELLOW. Used by _hurd_cap_class_cond_busy (). */
- pthread_t cond_waiter;
-
- /* The pending RPC worker threads for this class. */
- _hurd_cap_list_item_t pending_rpcs;
-};
-
-
-/* Server-side objects that are accessible via capabilities. */
-struct hurd_cap_obj
-{
- /* The class which contains this capability. */
- hurd_cap_class_t cap_class;
-
- /* The lock protecting all the members of the capability object. */
- pthread_mutex_t lock;
-
- /* The reference counter for this object. */
- uatomic32_t refs;
-
- /* The state of the capability object.
-
- If STATE is _HURD_CAP_STATE_GREEN, you can use the capability
- object. Otherwise, you must wait on the condition
- CAP_CLASS->OBJ_COND for it return to _HURD_CAP_STATE_GREEN.
-
- If the state is _HURD_CAP_STATE_YELLOW, a thread wants the state
- to be _HURD_CAP_STATE_RED (and it has canceled all other pending
- RPC threads on this object). The last worker thread for this
- capability object (other than the thread waiting for the
- condition to become _HURD_CAP_STATE_RED) must broadcast the
- obj_state_cond condition.
-
- Every worker thread that blocks on the capability object state
- until it reverts to _HURD_CAP_STATE_GREEN must perform a
- reauthentication when it is unblocked (ie, verify that the client
- still has access to the capability object), in case the
- capability of the client for this object was revoked in the
- meantime.
-
- _HURD_CAP_STATE_BLACK is not used for capability objects. */
- _hurd_cap_state_t state;
-
- /* The pending RPC worker threads for this capability object. */
- _hurd_cap_list_item_t pending_rpcs;
-
- /* The current waiter thread. This is only valid if STATE is
- _HURD_CAP_STATE_YELLOW. Used by _hurd_cap_obj_cond_busy (). */
- pthread_t cond_waiter;
-
- /* The list items in the capability entries of the clients using
- this capability. */
- _hurd_cap_list_item_t clients;
-};
-
-
-/* Operations on capability classes. */
-
-/* Create a new capability class for objects with the size SIZE and
- alignment requirement ALIGNMENT (which must be a power of 2).
-
- The callback OBJ_INIT is used whenever a capability object in this
- class is created. The callback OBJ_REINIT is used whenever a
- capability object in this class is deallocated and returned to the
- slab. OBJ_REINIT should bring back a capability object that is not
- used anymore into the same state as OBJ_INIT does for a freshly
- allocated object. OBJ_DESTROY should deallocate all resources for
- this capablity object. Note that OBJ_REINIT can not fail: If you
- have resources that can not safely be restored into their initial
- state, you cannot use OBJ_INIT to allocate them. Furthermore, note
- that OBJ_INIT will usually be called in bursts for advanced
- allocation.
-
- The new capability class is returned in R_CLASS. If the creation
- fails, an error value will be returned. */
-error_t hurd_cap_class_create_untyped (size_t size, size_t alignment,
- hurd_cap_obj_init_t obj_init,
- hurd_cap_obj_alloc_t obj_alloc,
- hurd_cap_obj_reinit_t obj_reinit,
- hurd_cap_obj_destroy_t obj_destroy,
- hurd_cap_class_demuxer_t demuxer,
- hurd_cap_class_t *r_class);
-
-/* Define a capability class for the pointer type TYPE. */
-#define hurd_cap_class_create(type,init,alloc,reinit,destroy,demuxer,r_class) \
- hurd_cap_class_create_untyped (({ type t; sizeof (*t); }), \
- ({ type t; __alignof__ (*t); }), \
- init, alloc, reinit, destroy, demuxer, \
- r_class);
-
-
-/* Destroy the capability class CAP_CLASS and release all associated
- resources. Note that this is only allowed if there are no
- capability objects in use, and if the capability class is not used
- by a capability server. This function assumes that the class was
- created with hurd_cap_class_create. */
-error_t hurd_cap_class_free (hurd_cap_class_t cap_class);
-
-
-/* Same as hurd_cap_class_create, but doesn't allocate the storage for
- CAP_CLASS. Instead, you have to provide it. */
-error_t hurd_cap_class_init_untyped (hurd_cap_class_t cap_class,
- size_t size, size_t alignment,
- hurd_cap_obj_init_t obj_init,
- hurd_cap_obj_alloc_t obj_alloc,
- hurd_cap_obj_reinit_t obj_reinit,
- hurd_cap_obj_destroy_t obj_destroy,
- hurd_cap_class_demuxer_t demuxer);
-
-/* Define a capability class for the pointer type TYPE. */
-#define hurd_cap_class_init(cclass,type,init,alloc,reinit,destroy,demuxer) \
- hurd_cap_class_init_untyped (cclass, ({ type t; sizeof (*t); }), \
- ({ type t; __alignof__ (*t); }), \
- init, alloc, reinit, destroy, demuxer);
-
-
-/* Destroy the capability class CAP_CLASS and release all associated
- resources. Note that this is only allowed if there are no
- capability objects in use, and if the capability class is not used
- by a capability server. This function assumes that the class has
- been initialized with hurd_cap_class_init. */
-error_t hurd_cap_class_destroy (hurd_cap_class_t cap_class);
-
-
-/* Allocate a new capability object in the class CAP_CLASS. The new
- capability object is locked and has one reference. It will be
- returned in R_OBJ. If the allocation fails, an error value will be
- returned. The object will be destroyed as soon as its last
- reference is dropped. */
-error_t hurd_cap_class_alloc (hurd_cap_class_t cap_class,
- hurd_cap_obj_t *r_obj);
-
-
-/* Get the offset of the user object following a capability.
- ALIGNMENT is the alignment requirements of the user object as
- supplied to hurd_cap_class_init, hurd_cap_class_init_untyped,
- hurd_cap_class_create or hurd_cap_class_create_untyped. */
-static inline size_t
-__attribute__((__always_inline__))
-hurd_cap_obj_user_offset (size_t alignment)
-{
- size_t offset = sizeof (struct hurd_cap_obj);
- size_t rest = sizeof (struct hurd_cap_obj) % alignment;
-
- if (rest)
- offset += alignment - rest;
-
- return offset;
-}
-
-
-/* Find the user object of the pointer type TYPE after the capability
- object OBJ. Note that in conjunction with the hurd_cap_obj_to_user
- macro below, all of this can and will be computed at compile time,
- if optimization is enabled. OBJ already fulfills the alignment
- requirement ALIGNMENT. */
-static inline void *
-__attribute__((__always_inline__))
-hurd_cap_obj_to_user_untyped (hurd_cap_obj_t obj, size_t alignment)
-{
- uintptr_t obj_addr = (uintptr_t) obj;
-
- obj_addr += hurd_cap_obj_user_offset (alignment);
-
- return (void *) obj_addr;
-}
-
-#define hurd_cap_obj_to_user(type,obj) \
- ((type) hurd_cap_obj_to_user_untyped (obj, ({ type t; __alignof__ (*t); })))
-
-
-/* Find the hurd cap object before the user object OBJ of the pointer
- type TYPE. Note that in conjunction with the hurd_cap_obj_from_user
- macro below, all of this can and will be computed at compile time,
- if optimization is enabled. OBJ already fulfills the alignment
- requirement ALIGNMENT. */
-static inline hurd_cap_obj_t
-__attribute__((__always_inline__))
-hurd_cap_obj_from_user_untyped (void *obj, size_t alignment)
-{
- uintptr_t obj_addr = (uintptr_t) obj;
-
- obj_addr -= hurd_cap_obj_user_offset (alignment);
-
- return (hurd_cap_obj_t) obj_addr;
-}
-
-#define hurd_cap_obj_from_user(type,obj) \
- hurd_cap_obj_from_user_untyped (obj, ({ type t; __alignof__ (*t); }))
-
-
-/* Inhibit all RPCs on the capability class CAP_CLASS (which must not
- be locked). You _must_ follow up with a hurd_cap_class_resume
- operation, and hold at least one reference to the object
- continuously until you did so. */
-error_t hurd_cap_class_inhibit (hurd_cap_class_t cap_class);
-
-
-/* Resume RPCs on the class CAP_CLASS and wake-up all waiters. */
-void hurd_cap_class_resume (hurd_cap_class_t cap_class);
-
-
-/* Operations on capability objects. */
-
-/* Lock the object OBJ. */
-static inline void
-hurd_cap_obj_lock (hurd_cap_obj_t obj)
-{
- pthread_mutex_lock (&obj->lock);
-}
-
-/* Unlock the object OBJ, which must be locked. */
-static inline void
-hurd_cap_obj_unlock (hurd_cap_obj_t obj)
-{
- pthread_mutex_unlock (&obj->lock);
-}
-
-
-/* Add a reference to the capability object OBJ. */
-static inline void
-hurd_cap_obj_ref (hurd_cap_obj_t obj)
-{
- atomic_increment (&obj->refs);
-}
-
-
-/* Remove one reference for the capability object OBJ, which must be
- locked. Note that the caller must have at least two references for
- this capability object when using this function. If this reference
- is potentially the last reference (i.e. the caller does not hold
- either directly or indirectly another reference to OBJ),
- hurd_cap_obj_drop must be used instead. */
-static inline void
-hurd_cap_obj_rele (hurd_cap_obj_t obj)
-{
- atomic_decrement (&obj->refs);
-}
-
-
-/* Remove one reference for the capability object OBJ, which must be
- locked, and will be unlocked when the function returns. If this
- was the last user of this object, the object is deallocated. */
-void hurd_cap_obj_drop (hurd_cap_obj_t obj);
-
-
-/* Inhibit all RPCs on the capability object OBJ (which must not be
- locked). You _must_ follow up with a hurd_cap_obj_resume
- operation, and hold at least one reference to the object
- continuously until you did so. */
-error_t hurd_cap_obj_inhibit (hurd_cap_obj_t obj);
-
-
-/* Resume RPCs on the capability object OBJ and wake-up all
- waiters. */
-void hurd_cap_obj_resume (hurd_cap_obj_t obj);
-
-
-/* Buckets are a set of capabilities, on which RPCs are managed
- collectively. */
-
-/* Create a new bucket and return it in R_BUCKET. */
-error_t hurd_cap_bucket_create (hurd_cap_bucket_t *r_bucket);
-
-
-/* Free the bucket BUCKET, which must not be used. */
-void hurd_cap_bucket_free (hurd_cap_bucket_t bucket);
-
-
-/* Copy out a capability for the capability OBJ to the client with the
- task ID TASK_ID. Returns the capability (valid only for this user)
- in *R_CAP, or an error. It is not safe to call this from outside
- an RPC on OBJ while the manager is running. */
-error_t hurd_cap_bucket_inject (hurd_cap_bucket_t bucket, hurd_cap_obj_t obj,
- hurd_task_id_t task_id,
- hurd_cap_handle_t *r_cap);
-
-
-/* If ASYNC is true, allocate worker threads asynchronously whenever
- the number of worker threads is exhausted. This is only actually
- required for physmem (the physical memory server), to allow to
- break out of a dead-lock between physmem and the task server. It
- should be unnecessary for any other server.
-
- The default is to false, which means that worker threads are
- allocated synchronously by the manager thread.
-
- This function should be called before the manager is started with
- hurd_cap_bucket_manage_mt. It is only used for the multi-threaded
- RPC manager. */
-error_t hurd_cap_bucket_worker_alloc (hurd_cap_bucket_t bucket, bool async);
-
-
-/* Start managing RPCs on the bucket BUCKET. The ROOT capability
- object, which must be unlocked and have one reference throughout
- the whole time this function runs, is used for bootstrapping client
- connections. The GLOBAL_TIMEOUT parameter specifies the number of
- seconds until the manager times out (if there are no active users
- of capability objects in precious classes). The WORKER_TIMEOUT
- parameter specifies the number of seconds until each worker thread
- times out (if there are no RPCs processed by the worker thread).
-
- If this returns ECANCELED, then hurd_cap_bucket_end was called with
- the force flag being true while there were still active users. If
- this returns without any error, then the timeout expired, or
- hurd_cap_bucket_end was called without active users of capability
- objects in precious classes. */
-error_t hurd_cap_bucket_manage_mt (hurd_cap_bucket_t bucket,
- hurd_cap_obj_t root,
- unsigned int global_timeout,
- unsigned int worker_timeout);
-
-
-/* Inhibit all RPCs on the capability bucket BUCKET (which must not be
- locked). You _must_ follow up with a hurd_cap_bucket_resume (or
- hurd_cap_bucket_end) operation. */
-error_t hurd_cap_bucket_inhibit (hurd_cap_bucket_t bucket);
-
-
-/* Resume RPCs on the class CAP_CLASS and wake-up all waiters. */
-void hurd_cap_bucket_resume (hurd_cap_bucket_t bucket);
-
-
-/* Exit from the server loop of the managed capability bucket BUCKET.
- This will only succeed if there are no active users, or if the
- FORCE flag is set (otherwise it will fail with EBUSY). The bucket
- must be inhibited. */
-error_t hurd_cap_bucket_end (hurd_cap_bucket_t bucket, bool force);
-
-
-/* If you want to use other capabilities in an RPC handler beside the
- one on which the RPC was invoked, you need to make sure that
- inhibition works on those other capabilities and cancel your
- operation. For this, the following interfaces are provided. */
-
-/* Forward. */
-struct hurd_cap_ctx_cap_use;
-
-/* Return the number of bytes required for a hurd_cap_ctx_cap_use
- structure. */
-size_t hurd_cap_ctx_size (void) __attribute__ ((const));
-
-/* The calling thread wishes to execute an RPC on the the handle
- HANDLE. The calling thread must already be registered as executing
- an RPC. RPC_CTX is the cooresponding RPC context. The function
- uses the structure CAP_USE, which must point to the number of bytes
- returned by hurd_cap_ctx_size, to store data required by
- hurd_cap_ctx_end_cap_use. The capability object corresponding to
- HANDLE is locked and returned in *OBJP.
-
- Returns EINVAL if the capability handle is invalid for the client.
-
- Returns ENOENT if there is no object associated with handle HANDLE.
-
- Returns EBAD if the capability is dead.
-
- Returns EDOM if the object associated with HANDLE is not in class
- REQUIRED_CLASS. If no type check is required, it will be skipped
- if REQURIED_CLASS is NULL. */
-error_t hurd_cap_ctx_start_cap_use (hurd_cap_rpc_context_t rpc_ctx,
- hurd_cap_handle_t handle,
- hurd_cap_class_t required_class,
- struct hurd_cap_ctx_cap_use *cap_use,
- hurd_cap_obj_t *objp);
-
-/* End the use of the object CAP_USE->OBJ, which must be locked. */
-void hurd_cap_ctx_end_cap_use (hurd_cap_rpc_context_t rpc_ctx,
- struct hurd_cap_ctx_cap_use *cap_use);
-
-#endif /* _HURD_CAP_SERVER_H */
diff --git a/libhurd-cap-server/class-alloc.c b/libhurd-cap-server/class-alloc.c
deleted file mode 100644
index b57ca97..0000000
--- a/libhurd-cap-server/class-alloc.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/* class-alloc.c - Allocate a capability object.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-
-#include <hurd/slab.h>
-#include <hurd/cap-server.h>
-
-
-/* Allocate a new capability object in the class CAP_CLASS. The new
- capability object is locked and has one reference. It will be
- returned in R_OBJ. If the allocation fails, an error value will be
- returned. */
-error_t
-hurd_cap_class_alloc (hurd_cap_class_t cap_class, hurd_cap_obj_t *r_obj)
-{
- error_t err;
- void *new_obj;
- hurd_cap_obj_t obj;
-
- err = hurd_slab_alloc (&cap_class->obj_space, &new_obj);
- if (err)
- return err;
- obj = new_obj;
-
- /* Let the user do their extra initialization. */
- if (cap_class->obj_alloc)
- {
- err = (*cap_class->obj_alloc) (cap_class, obj);
- if (err)
- {
- hurd_slab_dealloc (&cap_class->obj_space, obj);
- return err;
- }
- }
-
- /* Now take the lock. */
- hurd_cap_obj_lock (obj);
-
- *r_obj = obj;
- return 0;
-}
diff --git a/libhurd-cap-server/class-create.c b/libhurd-cap-server/class-create.c
deleted file mode 100644
index 2f4c16a..0000000
--- a/libhurd-cap-server/class-create.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/* class-create.c - Create a capability class.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-
-#include <hurd/cap-server.h>
-
-
-/* Create a new capability class for objects allocating SIZE bytes for
- the user object with alignment ALIGNMENT (i.e. size does NOT
- include the struct hurd_cap_obj which is placed at the beginning of
- each capability object).
-
- The callback OBJ_INIT is used whenever a capability object in this
- class is created. The callback OBJ_REINIT is used whenever a
- capability object in this class is deallocated and returned to the
- slab. OBJ_REINIT should return a capability object that is not
- used anymore into the same state as OBJ_INIT does for a freshly
- allocated object. OBJ_DESTROY should deallocate all resources for
- this capablity object. Note that if OBJ_INIT or OBJ_REINIT fails,
- the object is considered to be fully destroyed. No extra call to
- OBJ_DESTROY will be made for such objects.
-
- The new capability class is returned in R_CLASS. If the creation
- fails, an error value will be returned. */
-error_t
-hurd_cap_class_create_untyped (size_t size, size_t alignment,
- hurd_cap_obj_init_t obj_init,
- hurd_cap_obj_alloc_t obj_alloc,
- hurd_cap_obj_reinit_t obj_reinit,
- hurd_cap_obj_destroy_t obj_destroy,
- hurd_cap_class_demuxer_t demuxer,
- hurd_cap_class_t *r_class)
-{
- error_t err;
- hurd_cap_class_t cap_class = malloc (sizeof (struct hurd_cap_class));
-
- if (!cap_class)
- return errno;
-
- err = hurd_cap_class_init_untyped (cap_class, size, alignment, obj_init,
- obj_alloc, obj_reinit, obj_destroy,
- demuxer);
- if (err)
- {
- free (cap_class);
- return err;
- }
-
- *r_class = cap_class;
- return 0;
-}
diff --git a/libhurd-cap-server/class-destroy.c b/libhurd-cap-server/class-destroy.c
deleted file mode 100644
index 116d99a..0000000
--- a/libhurd-cap-server/class-destroy.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/* class-destroy.c - Destroy a capability class.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-
-#include <hurd/cap-server.h>
-
-
-/* Destroy the capability class CAP_CLASS and release all associated
- resources. Note that this is only allowed if there are no
- capability objects in use, and if the capability class is not used
- by a capability server. This function assumes that the class has
- been initialized with hurd_cap_class_init. */
-error_t
-hurd_cap_class_destroy (hurd_cap_class_t cap_class)
-{
- error_t err = 0;
-
- /* FIXME: This function needs to be revised. We need to take the
- locks, and if only for memory synchronization. */
-
- /* This will fail if there are still allocated capability
- objects. */
- err = hurd_slab_destroy (&cap_class->obj_space);
- if (err)
- return err;
-
- /* At this point, destruction will succeed. */
- pthread_cond_destroy (&cap_class->cond);
- pthread_mutex_destroy (&cap_class->lock);
- pthread_mutex_destroy (&cap_class->obj_cond_lock);
- pthread_cond_destroy (&cap_class->obj_cond);
-
- return 0;
-}
diff --git a/libhurd-cap-server/class-free.c b/libhurd-cap-server/class-free.c
deleted file mode 100644
index 6c5c0be..0000000
--- a/libhurd-cap-server/class-free.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* class-free.c - Free a capability class.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-
-#include <hurd/cap-server.h>
-
-
-/* Destroy the capability class CAP_CLASS and release all associated
- resources. Note that this is only allowed if there are no
- capability objects in use, and if the capability class is not used
- by a capability server. This function assumes that the class was
- created with hurd_cap_class_create. */
-error_t
-hurd_cap_class_free (hurd_cap_class_t cap_class)
-{
- error_t err;
-
- err = hurd_cap_class_destroy (cap_class);
- if (err)
- return err;
-
- free (cap_class);
- return 0;
-}
diff --git a/libhurd-cap-server/class-inhibit.c b/libhurd-cap-server/class-inhibit.c
deleted file mode 100644
index ea2a663..0000000
--- a/libhurd-cap-server/class-inhibit.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/* class-inhibit.c - Inhibit RPCs on a capability class.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Inhibit all RPCs on the capability class CAP_CLASS (which must not
- be locked). You _must_ follow up with a hurd_cap_class_resume
- operation, and hold at least one reference to the object
- continuously until you did so. */
-error_t
-hurd_cap_class_inhibit (hurd_cap_class_t cap_class)
-{
- error_t err;
-
- pthread_mutex_lock (&cap_class->lock);
-
- /* First wait until any other inhibitor has resumed the class. If
- this function is called within an RPC, we are going to be
- canceled anyway. Otherwise, it ensures that class inhibitions
- are fully serialized (per class). */
- while (cap_class->state != _HURD_CAP_STATE_GREEN)
- {
- err = hurd_cond_wait (&cap_class->cond, &cap_class->lock);
- if (err)
- {
- /* We have been canceled. */
- pthread_mutex_unlock (&cap_class->lock);
- return err;
- }
- }
-
- /* Now it is our turn to inhibit the class. */
- cap_class->cond_waiter = pthread_self ();
-
- if (_hurd_cap_class_cond_busy (cap_class))
- {
- _hurd_cap_list_item_t pending_rpc = cap_class->pending_rpcs;
-
- /* There are still pending RPCs (beside us). Cancel them. */
- while (pending_rpc)
- {
- if (pending_rpc->thread != cap_class->cond_waiter)
- pthread_cancel (pending_rpc->thread);
- pending_rpc = pending_rpc->next;
- }
-
- /* Indicate that we would like to know when they have gone. */
- cap_class->state = _HURD_CAP_STATE_YELLOW;
-
- /* The last one will shut the door. */
- do
- {
- err = hurd_cond_wait (&cap_class->cond, &cap_class->lock);
- if (err)
- {
- /* We have been canceled ourselves. Give up. */
- cap_class->state = _HURD_CAP_STATE_GREEN;
- pthread_mutex_unlock (&cap_class->lock);
- return err;
- }
- }
- while (cap_class->state != _HURD_CAP_STATE_RED);
- }
- else
- cap_class->state = _HURD_CAP_STATE_RED;
-
- /* Now all pending RPCs have been canceled and are completed (except
- us), and all incoming RPCs are inhibited. */
- pthread_mutex_unlock (&cap_class->lock);
-
- return 0;
-}
-
-
-/* Resume RPCs on the class CAP_CLASS and wake-up all waiters. */
-void
-hurd_cap_class_resume (hurd_cap_class_t cap_class)
-{
- pthread_mutex_lock (&cap_class->lock);
-
- cap_class->state = _HURD_CAP_STATE_GREEN;
-
- /* Broadcast the change to all potential waiters. */
- pthread_cond_broadcast (&cap_class->cond);
-
- pthread_mutex_unlock (&cap_class->lock);
-}
diff --git a/libhurd-cap-server/class-init.c b/libhurd-cap-server/class-init.c
deleted file mode 100644
index 150fa96..0000000
--- a/libhurd-cap-server/class-init.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/* class-init.c - Initialize a capability class.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <pthread.h>
-
-#include <hurd/slab.h>
-#include <hurd/cap-server.h>
-
-
-/* Initialize the slab object pointed to by BUFFER. HOOK is as
- provided to hurd_slab_create. */
-static error_t
-_hurd_cap_obj_constructor (void *hook, void *buffer)
-{
- hurd_cap_class_t cap_class = (hurd_cap_class_t) hook;
- hurd_cap_obj_t obj = (hurd_cap_obj_t) buffer;
- error_t err;
-
- /* First do our own initialization. */
- obj->cap_class = cap_class;
-
- err = pthread_mutex_init (&obj->lock, 0);
- if (err)
- return err;
-
- obj->refs = 1;
- obj->state = _HURD_CAP_STATE_GREEN;
- obj->pending_rpcs = NULL;
- /* The member COND_WAITER does not need to be initialized. */
- obj->clients = NULL;
-
- /* Then do the user part, if necessary. */
- if (cap_class->obj_init)
- {
- err = (*cap_class->obj_init) (cap_class, obj);
- if (err)
- {
- pthread_mutex_destroy (&obj->lock);
- return err;
- }
- }
-
- return 0;
-}
-
-
-/* Destroy the slab object pointed to by BUFFER. HOOK is as provided
- to hurd_slab_create. */
-static void
-_hurd_cap_obj_destructor (void *hook, void *buffer)
-{
- hurd_cap_class_t cap_class = (hurd_cap_class_t) hook;
- hurd_cap_obj_t obj = (hurd_cap_obj_t) buffer;
-
- if (cap_class->obj_destroy)
- (*cap_class->obj_destroy) (cap_class, obj);
-
- pthread_mutex_destroy (&obj->lock);
-}
-
-
-/* Same as hurd_cap_class_create, but doesn't allocate the storage for
- CAP_CLASS. Instead, you have to provide it. */
-error_t
-hurd_cap_class_init_untyped (hurd_cap_class_t cap_class,
- size_t size, size_t alignment,
- hurd_cap_obj_init_t obj_init,
- hurd_cap_obj_alloc_t obj_alloc,
- hurd_cap_obj_reinit_t obj_reinit,
- hurd_cap_obj_destroy_t obj_destroy,
- hurd_cap_class_demuxer_t demuxer)
-{
- error_t err;
-
- /* The alignment requirements must be a power of 2. */
- assert ((alignment & (alignment - 1)) == 0
- || ! "hurd_cap_class_init_untyped: "
- "requested alignment not a power of 2");
-
- /* Find the smallest alignment requirement common to the user object
- and a struct hurd_cap_obj. Since both are required to be a power
- of 2, we need simply take the larger one. */
- if (alignment < __alignof__(struct hurd_cap_obj))
- alignment = __alignof__(struct hurd_cap_obj);
-
- size += hurd_cap_obj_user_offset (alignment);
-
- /* Capability object management. */
-
- cap_class->obj_init = obj_init;
- cap_class->obj_alloc = obj_alloc;
- cap_class->obj_reinit = obj_reinit;
- cap_class->obj_destroy = obj_destroy;
-
- err = hurd_slab_init (&cap_class->obj_space, size, alignment, NULL, NULL,
- _hurd_cap_obj_constructor, _hurd_cap_obj_destructor,
- cap_class);
- if (err)
- goto err_obj_space;
-
- err = pthread_cond_init (&cap_class->obj_cond, NULL);
- if (err)
- goto err_obj_cond;
-
- err = pthread_mutex_init (&cap_class->obj_cond_lock, NULL);
- if (err)
- goto err_obj_cond_lock;
-
-
- /* Class management. */
-
- cap_class->demuxer = demuxer;
-
- err = pthread_mutex_init (&cap_class->lock, NULL);
- if (err)
- goto err_lock;
-
- cap_class->state = _HURD_CAP_STATE_GREEN;
-
- err = pthread_cond_init (&cap_class->cond, NULL);
- if (err)
- goto err_cond;
-
- /* The cond_waiter member doesn't need to be initialized. It is
- only valid when CAP_CLASS->STATE is _HURD_CAP_STATE_YELLOW. */
-
- cap_class->pending_rpcs = NULL;
-
- /* FIXME: Add the class to the list of classes to be served by
- RPCs. */
-
- return 0;
-
- /* This is provided here in case you add more initialization to the
- end of the above code. */
-#if 0
- pthread_cond_destroy (&cap_class->cond);
-#endif
-
- err_cond:
- pthread_mutex_destroy (&cap_class->lock);
-
- err_lock:
- pthread_mutex_destroy (&cap_class->obj_cond_lock);
-
- err_obj_cond_lock:
- pthread_cond_destroy (&cap_class->obj_cond);
-
- err_obj_cond:
- /* This can not fail at this point. */
- hurd_slab_destroy (&cap_class->obj_space);
-
- err_obj_space:
- return err;
-}
diff --git a/libhurd-cap-server/client-create.c b/libhurd-cap-server/client-create.c
deleted file mode 100644
index ac8c54a..0000000
--- a/libhurd-cap-server/client-create.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* client-create.c - Create a capability client.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <assert.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Client management code. */
-
-/* Allocate a new capability client structure for the slab cache. */
-static error_t
-_hurd_cap_client_constructor (void *hook, void *buffer)
-{
- _hurd_cap_client_t client = (_hurd_cap_client_t) buffer;
- error_t err;
-
- err = pthread_mutex_init (&client->lock, NULL);
- if (err)
- return err;
-
- client->state = _HURD_CAP_STATE_GREEN;
- client->pending_rpcs = NULL;
-
- /* Capabilities are mapped to clients many to many, so we can not
- use a location pointer. However, this is not critical as
- removing an entry only blocks out RPCs for the same client, and
- not others. */
- hurd_ihash_init (&client->caps_reverse, HURD_IHASH_NO_LOCP);
-
- return 0;
-
- /* This is provided here in case you add more initialization to the
- end of the above code. */
-#if 0
- pthread_mutex_destroy (&client->lock);
-
- return err;
-#endif
-}
-
-
-/* Allocate a new capability client structure for the slab cache. */
-static void
-_hurd_cap_client_destructor (void *hook, void *buffer)
-{
- _hurd_cap_client_t client = (_hurd_cap_client_t) buffer;
-
- hurd_ihash_destroy (&client->caps_reverse);
- hurd_table_destroy (&client->caps);
- pthread_mutex_destroy (&client->lock);
-}
-
-
-/* The global slab for all capability clients. */
-struct hurd_slab_space _hurd_cap_client_space
- = HURD_SLAB_SPACE_INITIALIZER (struct _hurd_cap_client, NULL, NULL,
- _hurd_cap_client_constructor,
- _hurd_cap_client_destructor, NULL);
-
-
-static error_t
-_hurd_cap_client_alloc (hurd_task_id_t task_id,
- _hurd_cap_client_t *r_client)
-{
- error_t err;
- void *new_client;
- _hurd_cap_client_t client;
-
- err = hurd_slab_alloc (&_hurd_cap_client_space, &new_client);
- if (err)
- return err;
-
- client = new_client;
-
- /* CLIENT->id will be initialized by the caller when adding the
- client to the client table of the class. */
- client->task_id = task_id;
-
- err = hurd_table_init (&client->caps, sizeof (_hurd_cap_obj_entry_t));
- if (err)
- {
- free (client);
- return err;
- }
-
- /* FIXME: We need to acquire a task info capability here. The task
- death notifications have been suspended by the caller. */
-
- *r_client = client;
- return 0;
-}
-
-
-/* Look up the client with the task ID TASK in the bucket BUCKET, and
- return it in R_CLIENT, with one additional reference. If it is not
- found, create it. */
-error_t
-__attribute__((visibility("hidden")))
-_hurd_cap_client_create (hurd_cap_bucket_t bucket,
- hurd_task_id_t task_id,
- _hurd_cap_client_t *r_client)
-{
- error_t err = 0;
- _hurd_cap_client_t client;
-
- pthread_mutex_lock (&bucket->lock);
- client = (_hurd_cap_client_t) hurd_ihash_find (&bucket->clients_reverse,
- task_id);
- if (client)
- {
- if (client->dead)
- err = EINVAL; /* FIXME: A more appropriate code? */
- else
- {
- client->refs++;
- *r_client = client;
- }
- pthread_mutex_unlock (&bucket->lock);
- return err;
- }
- pthread_mutex_unlock (&bucket->lock);
-
- /* The client is not yet registered. Block out processing task
- death notifications, create a new client structure, and then
- enter it into the table before resuming task death
- notifications. */
- hurd_task_death_notify_suspend ();
- err = _hurd_cap_client_alloc (task_id, r_client);
- if (err)
- {
- hurd_task_death_notify_resume ();
- return err;
- }
-
- pthread_mutex_lock (&bucket->lock);
- /* Since we dropped the bucket lock during the allocation (which is
- potentially long) we need to check that somebody else didn't
- already allocate a client data structure. If so, we can just use
- that. Otherwise, we continue. */
- client = (_hurd_cap_client_t) hurd_ihash_find (&bucket->clients_reverse,
- task_id);
- if (client)
- {
- if (client->dead)
- {
- err = EINVAL; /* FIXME: A more appropriate code? */
- pthread_mutex_unlock (&bucket->lock);
- }
- else
- {
- /* Somebody else was indeed faster. Use the existing entry. */
- client->refs++;
- pthread_mutex_unlock (&bucket->lock);
- _hurd_cap_client_dealloc (bucket, *r_client);
- *r_client = client;
- }
- return err;
- }
-
- client = *r_client;
-
- /* One reference for the fact that the client task lives, one for
- the caller. */
- client->refs = 2;
-
- err = hurd_table_enter (&bucket->clients, &client, &client->id);
- if (!err)
- {
- err = hurd_ihash_add (&bucket->clients_reverse, task_id, client);
- if (err)
- hurd_table_remove (&bucket->clients, client->id);
- }
- if (err)
- {
- pthread_mutex_unlock (&bucket->lock);
- hurd_task_death_notify_resume ();
-
- _hurd_cap_client_dealloc (bucket, client);
- return err;
- }
- pthread_mutex_unlock (&bucket->lock);
- hurd_task_death_notify_resume ();
-
- *r_client = client;
-
- return 0;
-}
diff --git a/libhurd-cap-server/client-inhibit.c b/libhurd-cap-server/client-inhibit.c
deleted file mode 100644
index 2228b5f..0000000
--- a/libhurd-cap-server/client-inhibit.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/* client-inhibit.c - Inhibit RPCs on a capability client.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Inhibit all RPCs on the capability client CLIENT (which must not be
- locked) in the capability bucket BUCKET. You _must_ follow up
- with a hurd_cap_client_resume operation, and hold at least one
- reference to the object continuously until you did so. */
-error_t
-_hurd_cap_client_inhibit (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
-{
- error_t err;
-
- /* First take the bucket-wide lock for conditions on capability
- client states. */
- pthread_mutex_lock (&bucket->client_cond_lock);
-
- /* Then lock the client to check its state. */
- pthread_mutex_lock (&client->lock);
-
- /* First wait until any other inhibitor has resumed the capability
- client. This ensures that capability client inhibitions are
- fully serialized (per capability client). */
- while (client->state != _HURD_CAP_STATE_GREEN)
- {
- pthread_mutex_unlock (&client->lock);
- err = hurd_cond_wait (&bucket->client_cond,
- &bucket->client_cond_lock);
- if (err)
- {
- /* We have been canceled. */
- pthread_mutex_unlock (&bucket->client_cond_lock);
- return err;
- }
- pthread_mutex_lock (&client->lock);
- }
-
- /* Now it is our turn to inhibit the capability client. */
- client->cond_waiter = pthread_self ();
-
- if (_hurd_cap_client_cond_busy (client))
- {
- _hurd_cap_list_item_t pending_rpc = client->pending_rpcs;
-
- /* There are still pending RPCs (beside us). Cancel them. */
- while (pending_rpc)
- {
- if (pending_rpc->thread != client->cond_waiter)
- pthread_cancel (pending_rpc->thread);
- pending_rpc = pending_rpc->next;
- }
-
- /* Indicate that we would like to know when they have gone. */
- client->state = _HURD_CAP_STATE_YELLOW;
-
- /* The last one will shut the door. */
- do
- {
- pthread_mutex_unlock (&client->lock);
- err = hurd_cond_wait (&bucket->client_cond,
- &bucket->client_cond_lock);
- if (err)
- {
- /* We have been canceled ourselves. Give up. */
- client->state = _HURD_CAP_STATE_GREEN;
- pthread_mutex_unlock (&bucket->client_cond_lock);
- return err;
- }
- pthread_mutex_lock (&client->lock);
- }
- while (client->state != _HURD_CAP_STATE_RED);
- }
- else
- client->state = _HURD_CAP_STATE_RED;
-
- /* Now all pending RPCs have been canceled and are completed (except
- us), and all incoming RPCs are inhibited. */
- pthread_mutex_unlock (&client->lock);
- pthread_mutex_unlock (&bucket->client_cond_lock);
-
- return 0;
-}
-
-
-/* Resume RPCs on the capability client CLIENT in the bucket BUCKET
- and wake-up all waiters. */
-void
-_hurd_cap_client_resume (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
-{
- pthread_mutex_lock (&bucket->client_cond_lock);
- pthread_mutex_lock (&client->lock);
-
- client->state = _HURD_CAP_STATE_GREEN;
-
- /* Broadcast the change to all potential waiters. */
- pthread_cond_broadcast (&bucket->client_cond);
-
- pthread_mutex_unlock (&client->lock);
- pthread_mutex_unlock (&bucket->client_cond_lock);
-}
-
-
-/* End RPCs on the capability client CLIENT in the bucket BUCKET and
- wake-up all waiters. */
-void
-_hurd_cap_client_end (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
-{
- pthread_mutex_lock (&bucket->client_cond_lock);
- pthread_mutex_lock (&bucket->lock);
-
- client->state = _HURD_CAP_STATE_BLACK;
-
- /* Broadcast the change to all potential waiters. Even though the
- task is dead now, there is a race condition where we will process
- one spurious incoming RPC which is blocked on the inhibited
- state. So we wake up such threads, they will then go away
- quickly.
-
- Note that this does not work reliably for still living clients:
- They may bombard us with RPCs and thus keep the reference count
- of the client in the bucket table above 0 all the time, even in
- the _HURD_CAP_STATE_BLACK state. This is the reason that this
- interface is only for internal use (by
- _hurd_cap_client_death). */
- pthread_cond_broadcast (&bucket->client_cond);
-
- pthread_mutex_unlock (&bucket->lock);
- pthread_mutex_unlock (&bucket->client_cond_lock);
-}
diff --git a/libhurd-cap-server/client-release.c b/libhurd-cap-server/client-release.c
deleted file mode 100644
index 57d3796..0000000
--- a/libhurd-cap-server/client-release.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/* client-release.c - Release a capability client.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include <compiler.h>
-
-#include "cap-server-intern.h"
-
-
-/* Deallocate the connection client CLIENT. */
-void
-_hurd_cap_client_dealloc (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
-{
- unsigned int nr_caps = 0;
-
- /* This function is only invoked if the reference count for the
- client entry in the client table of the class drops to 0, and
- after the table entry was removed. Usually, the last reference
- is removed by the task death notification handler.
-
- At that point, there are no more pending RPCs for this client (if
- there were, there would be a reference for each of them). This
- also means that all capability IDs have at most one internal
- reference, the one for all external references. */
-
- /* Note that although the client has been removed from the clients
- table in the class, there are still back-references for each and
- every capability object in our capability table caps. These
- capability entries all count as references to ourself. They are
- used for example if a capability is revoked. It is important to
- realize that such a revocation can happen anytime as long as
- there are still valid capability objects in the caps table of the
- client.
-
- So, to correctly release those references, we have to look up
- each capability object properly, acquiring our own internal
- reference for it, then we have to unlock the client to lock the
- capability object, to finally revoke our own capability and
- release the capability object reference. Only then can we
- reacquire our own lock and go on to the next capability. While
- we do not hold our lock, more capabilities can be revoked by
- other concurrent operations. However, no new capabilities are
- added, so one pass through the table is enough. */
-
- pthread_mutex_lock (&client->lock);
-
- /* Release all capability objects held by this user. Because we
- have to honor the locking order, this takes a while. */
- HURD_TABLE_ITERATE (&client->caps, idx)
- {
- _hurd_cap_obj_entry_t entry;
-
- entry = *((_hurd_cap_obj_entry_t *)
- HURD_TABLE_LOOKUP (&client->caps, idx));
-
- /* If there were no external references, the last internal
- reference would have been released before we get here. */
- assert (entry->external_refs);
-
- nr_caps++;
-
- /* The number of internal references is either one or zero. If
- it is one, then the capability is not revoked yet, so we have
- to do it. If it is zero, then the capability is revoked
- (dead), and we only have to clear the table entry. */
- if (!entry->dead)
- {
- hurd_cap_obj_t cap_obj = entry->cap_obj;
-
- assert (entry->internal_refs == 1);
-
- /* Acquire an internal reference to prevent that our own
- reference to the capability object is removed by a
- concurrent revocation as soon as we unlock the client.
- After all, the existing internal reference belongs to the
- capability object, and not to us. */
- entry->internal_refs++;
- pthread_mutex_unlock (&client->lock);
-
- pthread_mutex_lock (&cap_obj->lock);
- /* Check if we should revoke it, or if somebody else did already. */
- if (!entry->dead)
- {
- int found;
-
- /* We should revoke it. */
- pthread_mutex_lock (&client->lock);
- found = hurd_ihash_remove (&client->caps_reverse,
- (hurd_ihash_key_t) cap_obj);
- assert (found);
- entry->dead = 1;
-
- assert (entry->internal_refs == 2);
- entry->internal_refs--;
- pthread_mutex_unlock (&client->lock);
-
- /* FIXME: Remove it from the capabilities client list. */
- }
- pthread_mutex_unlock (&cap_obj->lock);
-
- pthread_mutex_lock (&client->lock);
- /* Now we can drop the capability object below. */
- assert (entry->dead);
- assert (entry->internal_refs == 1);
- assert (entry->external_refs);
- }
- else
- {
- /* If the capability is dead, we can simply drop it below. */
- assert (entry->internal_refs == 0);
- entry->internal_refs = 1;
- }
-
- entry->dead = 0;
- /* ENTRY->internal_refs is 1. */
- entry->external_refs = 1;
-
- /* Remove the entry. */
- hurd_slab_dealloc (&_hurd_cap_obj_entry_space, entry);
- hurd_table_remove (&client->caps, idx);
- }
-
- /* After all this ugly work, the rest is trivial. */
- if (client->state != _HURD_CAP_STATE_GREEN)
- client->state = _HURD_CAP_STATE_GREEN;
-
- assert (client->pending_rpcs == NULL);
-
- /* FIXME: Release the task info capability here. */
-
- /* FIXME: It would be a good idea to shrink the empty table and
- empty hash here, to reclaim resources and be able to eventually
- enforce a per-client quota. */
- pthread_mutex_unlock (&client->lock);
-
- pthread_mutex_lock (&bucket->lock);
- bucket->nr_caps -= nr_caps;
- pthread_mutex_unlock (&bucket->lock);
-
- hurd_slab_dealloc (&_hurd_cap_client_space, client);
-}
-
-
-/* Release a reference for the client with the ID IDX in bucket
- BUCKET. */
-void
-_hurd_cap_client_release (hurd_cap_bucket_t bucket, _hurd_cap_client_id_t idx)
-{
- _hurd_cap_client_t client;
-
- pthread_mutex_lock (&bucket->lock);
- client = *(_hurd_cap_client_t *) HURD_TABLE_LOOKUP (&bucket->clients, idx);
-
- if (EXPECT_TRUE (client->refs > 1))
- {
- client->refs--;
- pthread_mutex_unlock (&bucket->lock);
- }
- else
- {
- hurd_table_remove (&bucket->clients, idx);
- hurd_ihash_locp_remove (&bucket->clients_reverse, client->locp);
-
- pthread_mutex_unlock (&bucket->lock);
- _hurd_cap_client_dealloc (bucket, client);
- }
-}
diff --git a/libhurd-cap-server/ctx-cap-use.c b/libhurd-cap-server/ctx-cap-use.c
deleted file mode 100644
index 3758c19..0000000
--- a/libhurd-cap-server/ctx-cap-use.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/* ctx-cap-use.c - Use capabilities within an RPC context.
- Copyright (C) 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <stdlib.h>
-
-#include <hurd/cap-server.h>
-
-#include <compiler.h>
-
-#include "cap-server-intern.h"
-
-
-/* Return the number of bytes required for a hurd_cap_ctx_cap_use
- structure. */
-size_t
-hurd_cap_ctx_size (void)
-{
- return sizeof (struct hurd_cap_ctx_cap_use);
-}
-
-/* If you want to use other capabilities in an RPC handler beside the
- one on which the RPC was invoked, you need to make sure that
- inhibition works on those other capabilities and cancel your
- operation. For this, the following interfaces are provided. */
-
-/* The calling thread wishes to execute an RPC on the the handle
- HANDLE. The calling thread must already be registered as executing
- an RPC. RPC_CTX is the cooresponding RPC context. The function
- uses the structure CAP_USE, which must point to the number of bytes
- returned by hurd_cap_ctx_size, to store data required by
- hurd_cap_ctx_end_cap_use. The capability object corresponding to
- HANDLE is locked and returned in *OBJP.
-
- Returns EINVAL if the capability handle is invalid for the client.
-
- Returns ENOENT if there is no object associated with handle HANDLE.
-
- Returns EBAD if the capability is dead.
-
- Returns EDOM if the object associated with HANDLE is not in class
- REQUIRED_CLASS. If no type check is required, it will be skipped
- if REQURIED_CLASS is NULL. */
-error_t
-hurd_cap_ctx_start_cap_use (hurd_cap_rpc_context_t rpc_ctx,
- hurd_cap_handle_t handle,
- hurd_cap_class_t required_class,
- struct hurd_cap_ctx_cap_use *cap_use,
- hurd_cap_obj_t *objp)
-{
- error_t err = 0;
- hurd_cap_bucket_t bucket = rpc_ctx->bucket;
- _hurd_cap_client_t client = rpc_ctx->client;
- hurd_cap_obj_t obj;
- hurd_cap_class_t cap_class;
- _hurd_cap_obj_entry_t obj_entry;
- _hurd_cap_obj_entry_t *obj_entryp;
-
-
- /* HANDLE must belong to the same client as RPC_CTX->HANDLE. */
- if (_hurd_cap_client_id (handle) != _hurd_cap_client_id (rpc_ctx->handle))
- return EINVAL;
-
- pthread_mutex_lock (&client->lock);
-
- /* Look up the object. */
- obj_entryp = (_hurd_cap_obj_entry_t *)
- hurd_table_lookup (&client->caps, _hurd_cap_id (handle));
- if (!obj_entryp)
- err = ENOENT;
- else
- {
- cap_use->_obj_entry = obj_entry = *obj_entryp;
-
- if (EXPECT_FALSE (!obj_entry->external_refs))
- err = ENOENT;
- else if (EXPECT_FALSE (obj_entry->dead))
- err = EBADF;
- else
- {
- obj_entry->internal_refs++;
- *objp = obj = obj_entry->cap_obj;
- }
- }
- pthread_mutex_unlock (&client->lock);
-
- if (err)
- /* Either the capability ID is invalid, or it was revoked. */
- return err;
-
- /* If HANDLE and RPC_CTX->HANDLE are the same, we are done. */
- if (EXPECT_FALSE (_hurd_cap_id (handle) == _hurd_cap_id (rpc_ctx->handle)))
- {
- assert (obj == rpc_ctx->obj);
- return 0;
- }
-
- /* At this point, CAP and OBJ are valid and we have one internal
- reference to the capability entry. */
-
- cap_class = obj->cap_class;
-
- if (required_class && cap_class != required_class)
- {
- err = EINVAL;
- goto client_cleanup;
- }
-
- if (cap_class != rpc_ctx->obj->cap_class)
- /* The capability class is not the same as the first caps. We
- need to add ourself to the cap class pending rpc list. */
- {
- pthread_mutex_lock (&cap_class->lock);
- /* First, we have to check if the class is inhibited, and if it is,
- we have to wait until it is uninhibited. */
- while (!err && cap_class->state != _HURD_CAP_STATE_GREEN)
- err = hurd_cond_wait (&cap_class->cond, &cap_class->lock);
- if (err)
- {
- /* Canceled. */
- pthread_mutex_unlock (&cap_class->lock);
- goto client_cleanup;
- }
-
- /* Now add ourself to the pending rpc list of the class */
- cap_use->_worker_class.thread = pthread_self ();
- cap_use->_worker_class.tid = l4_myself ();
- _hurd_cap_list_item_add (&cap_class->pending_rpcs,
- &cap_use->_worker_class);
-
- pthread_mutex_unlock (&cap_class->lock);
- }
-
- pthread_mutex_lock (&obj->lock);
- /* First, we have to check if the object is inhibited, and if it is,
- we have to wait until it is uninhibited. */
- if (obj->state != _HURD_CAP_STATE_GREEN)
- {
- pthread_mutex_unlock (&obj->lock);
- pthread_mutex_lock (&cap_class->obj_cond_lock);
- pthread_mutex_lock (&obj->lock);
- while (!err && obj->state != _HURD_CAP_STATE_GREEN)
- {
- pthread_mutex_unlock (&obj->lock);
- err = hurd_cond_wait (&cap_class->obj_cond,
- &cap_class->obj_cond_lock);
- pthread_mutex_lock (&obj->lock);
- }
- pthread_mutex_unlock (&cap_class->obj_cond_lock);
- }
- if (err)
- {
- /* Canceled. */
- pthread_mutex_unlock (&obj->lock);
- goto class_cleanup;
- }
-
- /* Now check if the client still has the capability, or if it was
- revoked. */
- pthread_mutex_lock (&client->lock);
- if (obj_entry->dead)
- err = EBADF;
- pthread_mutex_unlock (&client->lock);
- if (err)
- {
- /* The capability was revoked in the meantime. */
- pthread_mutex_unlock (&obj->lock);
- goto class_cleanup;
- }
-
- cap_use->_worker_obj.thread = pthread_self ();
- cap_use->_worker_obj.tid = l4_myself ();
- _hurd_cap_list_item_add (&cap_class->pending_rpcs, &cap_use->_worker_obj);
-
- /* At this point, we have looked up the capability, acquired an
- internal reference for its entry in the client table (which
- implicitely keeps a reference acquired for the object itself),
- acquired a reference for the capability client in the bucket, and
- have added an item to the pending_rpcs lists in the class (if
- necessary) and object. The object is locked. */
-
- return 0;
-
- class_cleanup:
- if (cap_use->_obj_entry->cap_obj->cap_class != rpc_ctx->obj->cap_class)
- /* Different classes. */
- {
- pthread_mutex_lock (&cap_class->lock);
- _hurd_cap_list_item_remove (&cap_use->_worker_class);
- _hurd_cap_class_cond_check (cap_class);
- pthread_mutex_unlock (&cap_class->lock);
- }
-
- client_cleanup:
- pthread_mutex_lock (&client->lock);
-
- /* You are not allowed to revoke a capability while there are
- pending RPCs on it. This is the reason we know that there must
- be at least one extra internal reference. FIXME: For
- cleanliness, this could still call some inline function that does
- the decrement. The assert can be a hint to the compiler to
- optimize the inline function expansion anyway. */
- assert (!obj_entry->dead);
- assert (obj_entry->internal_refs > 1);
- obj_entry->internal_refs--;
- pthread_mutex_unlock (&client->lock);
-
- return err;
-}
-
-
-/* End the use of the object CAP_USE->OBJ, which must be locked. */
-void
-hurd_cap_ctx_end_cap_use (hurd_cap_rpc_context_t rpc_ctx,
- struct hurd_cap_ctx_cap_use *cap_use)
-{
- _hurd_cap_obj_entry_t entry = cap_use->_obj_entry;
- hurd_cap_obj_t obj = entry->cap_obj;
- _hurd_cap_client_t client = rpc_ctx->client;
-
- /* Is this an additional use of the main capability object? */
- if (EXPECT_TRUE (obj != rpc_ctx->obj))
- /* No. */
- {
- hurd_cap_class_t cap_class = obj->cap_class;
-
- /* End the RPC on the object. */
- _hurd_cap_list_item_remove (&cap_use->_worker_obj);
- _hurd_cap_obj_cond_check (obj);
-
- if (cap_class != rpc_ctx->obj->cap_class)
- /* The capability object is in a different class from the primary
- capability object. */
- {
- pthread_mutex_lock (&cap_class->lock);
- _hurd_cap_list_item_remove (&cap_use->_worker_class);
- _hurd_cap_class_cond_check (cap_class);
- pthread_mutex_unlock (&cap_class->lock);
- }
- }
-
- hurd_cap_obj_unlock (obj);
-
- /* You are not allowed to revoke a capability while there are
- pending RPCs on it. This is the reason why we know that there
- must be at least one extra internal reference. FIXME: For
- cleanliness, this could still call some inline function that does
- the decrement. The assert can be a hint to the compiler to
- optimize the inline function expansion anyway. */
-
- pthread_mutex_lock (&client->lock);
- assert (!entry->dead);
- assert (entry->internal_refs > 1);
- entry->internal_refs--;
- pthread_mutex_unlock (&client->lock);
-}
diff --git a/libhurd-cap-server/headers.m4 b/libhurd-cap-server/headers.m4
deleted file mode 100644
index 886469e..0000000
--- a/libhurd-cap-server/headers.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-# headers.m4 - Autoconf snippets to install links for header files.
-# Copyright 2003 Free Software Foundation, Inc.
-# Written by Marcus Brinkmann <marcus@gnu.org>.
-#
-# This file is free software; as a special exception the author gives
-# unlimited permission to copy and/or distribute it, with or without
-# modifications, as long as this notice is preserved.
-#
-# This file is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
-# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-AC_CONFIG_LINKS([include/hurd/cap-server.h:libhurd-cap-server/cap-server.h])
diff --git a/libhurd-cap-server/obj-copy-out.c b/libhurd-cap-server/obj-copy-out.c
deleted file mode 100644
index c0e8b93..0000000
--- a/libhurd-cap-server/obj-copy-out.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/* obj-copy-out.c - Copy out a capability to a client.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Copy out a capability for the capability OBJ to the user CLIENT.
- Returns the capability ID (valid only for this user) in *R_ID, or
- an error. OBJ must be locked. Note: No internal reference for
- this capability is allocated for the caller. */
-error_t
-_hurd_cap_obj_copy_out (hurd_cap_obj_t obj, hurd_cap_bucket_t bucket,
- _hurd_cap_client_t client, _hurd_cap_id_t *r_id)
-{
- void *new_entry;
- _hurd_cap_obj_entry_t entry;
-
- pthread_mutex_lock (&client->lock);
- entry = (_hurd_cap_obj_entry_t) hurd_ihash_find (&client->caps_reverse,
- (hurd_ihash_key_t) obj);
-
- if (entry)
- {
- entry->external_refs++;
- *r_id = entry->id;
- pthread_mutex_unlock (&client->lock);
- return 0;
- }
- else
- {
- _hurd_cap_obj_entry_t entry_check;
- error_t err;
-
- pthread_mutex_unlock (&client->lock);
- err = hurd_slab_alloc (&_hurd_cap_obj_entry_space, &new_entry);
- if (err)
- return err;
- entry = new_entry;
-
- entry->cap_obj = obj;
- /* ID is filled in when adding the object to the table. */
- /* CLIENT_ITEM is filled after the object has been entered. */
- /* DEAD is 0 for initialized objects. */
- /* INTERNAL_REFS is 1 for initialized objects. */
- /* EXTERNAL_REFS is 1 for initialized objects. */
-
- pthread_mutex_lock (&client->lock);
- entry_check = hurd_ihash_find (&client->caps_reverse,
- (hurd_ihash_key_t) obj);
- if (entry_check)
- {
- /* Somebody else was faster. */
- entry_check->external_refs++;
- *r_id = entry_check->id;
- pthread_mutex_unlock (&client->lock);
- hurd_slab_dealloc (&_hurd_cap_obj_entry_space, entry);
- return 0;
- }
-
- /* Add the entry to the cap table of the client. */
- err = hurd_table_enter (&client->caps, &entry, &entry->id);
- if (err)
- {
- pthread_mutex_unlock (&client->lock);
- hurd_slab_dealloc (&_hurd_cap_obj_entry_space, entry);
- return err;
- }
- err = hurd_ihash_add (&client->caps_reverse,
- (hurd_ihash_key_t) obj, entry);
- if (err)
- {
- hurd_table_remove (&client->caps, entry->id);
- pthread_mutex_unlock (&client->lock);
- hurd_slab_dealloc (&_hurd_cap_obj_entry_space, entry);
- return err;
- }
-
- *r_id = entry->id;
- pthread_mutex_unlock (&client->lock);
-
- /* Add the object to the list. */
- _hurd_cap_list_item_add (&obj->clients, &entry->client_item);
-
- /* Add a reference for the internal reference of the capability
- entry to the capability object. */
- obj->refs++;
-
- /* FIXME: Should probably use spin lock here, or so. */
- pthread_mutex_lock (&bucket->lock);
- bucket->nr_caps++;
- pthread_mutex_unlock (&bucket->lock);
-
- return 0;
- }
-}
diff --git a/libhurd-cap-server/obj-dealloc.c b/libhurd-cap-server/obj-dealloc.c
deleted file mode 100644
index de4421e..0000000
--- a/libhurd-cap-server/obj-dealloc.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/* obj-dealloc.c - Deallocate a capability object.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <assert.h>
-#include <pthread.h>
-
-#include "cap-server-intern.h"
-
-
-/* Deallocate the capability object OBJ, which must be locked and have
- no more references. */
-void
-_hurd_cap_obj_dealloc (hurd_cap_obj_t obj)
-{
- hurd_cap_class_t cap_class = obj->cap_class;
-
- /* First let the user do their reinitialization. */
- (*cap_class->obj_reinit) (cap_class, obj);
-
- /* Now do our part of the reinitialization. */
- assert (obj->refs == 0);
- assert (obj->state == _HURD_CAP_STATE_GREEN);
- assert (obj->pending_rpcs == NULL);
- assert (obj->clients == NULL);
- hurd_cap_obj_unlock (obj);
-
- hurd_slab_dealloc (&cap_class->obj_space, obj);
-}
diff --git a/libhurd-cap-server/obj-drop.c b/libhurd-cap-server/obj-drop.c
deleted file mode 100644
index 6942ab1..0000000
--- a/libhurd-cap-server/obj-drop.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* obj-drop.c - Drop a reference to a capability object.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include "cap-server-intern.h"
-
-
-/* Remove one reference for the capability object OBJ, which must be
- locked, and will be unlocked when the function returns. If this
- was the last user of this object, the object is deallocated. */
-void
-hurd_cap_obj_drop (hurd_cap_obj_t obj)
-{
- _hurd_cap_obj_drop (obj);
-}
diff --git a/libhurd-cap-server/obj-entry-space.c b/libhurd-cap-server/obj-entry-space.c
deleted file mode 100644
index 5eb33ba..0000000
--- a/libhurd-cap-server/obj-entry-space.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/* obj-entry-space.c - The capability object entry slab space.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-
-#include <hurd/slab.h>
-
-#include "cap-server-intern.h"
-
-
-static error_t
-_hurd_cap_obj_entry_constructor (void *hook, void *buffer)
-{
- _hurd_cap_obj_entry_t entry = (_hurd_cap_obj_entry_t) buffer;
-
- /* The members cap_obj and client_item are initialized at
- instantiation time. */
-
- entry->dead = 0;
- entry->internal_refs = 1;
- entry->external_refs = 1;
-
- return 0;
-}
-
-
-/* The global slab for all capability entries. */
-struct hurd_slab_space _hurd_cap_obj_entry_space
- = HURD_SLAB_SPACE_INITIALIZER (struct _hurd_cap_obj_entry, NULL, NULL,
- _hurd_cap_obj_entry_constructor, NULL, NULL);
diff --git a/libhurd-cap-server/obj-inhibit.c b/libhurd-cap-server/obj-inhibit.c
deleted file mode 100644
index 22455b3..0000000
--- a/libhurd-cap-server/obj-inhibit.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/* obj-inhibit.c - Inhibit RPCs on a capability object.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include "cap-server-intern.h"
-
-
-/* Inhibit all RPCs on the capability object CAP_OBJ (which must not
- be locked). You _must_ follow up with a hurd_cap_obj_resume
- operation, and hold at least one reference to the object
- continuously until you did so. */
-error_t
-hurd_cap_obj_inhibit (hurd_cap_obj_t obj)
-{
- hurd_cap_class_t cap_class = obj->cap_class;
- error_t err;
-
- /* First take the class-wide lock for conditions on capability
- object states. */
- pthread_mutex_lock (&cap_class->obj_cond_lock);
-
- /* Then lock the object to check its state. */
- pthread_mutex_lock (&obj->lock);
-
- /* First wait until any other inhibitor has resumed the capability
- object. This ensures that capability object inhibitions are
- fully serialized (per capability object). */
- while (obj->state != _HURD_CAP_STATE_GREEN)
- {
- pthread_mutex_unlock (&obj->lock);
- err = hurd_cond_wait (&cap_class->obj_cond,
- &cap_class->obj_cond_lock);
- if (err)
- {
- /* We have been canceled. */
- pthread_mutex_unlock (&cap_class->obj_cond_lock);
- return err;
- }
- pthread_mutex_lock (&obj->lock);
- }
-
- /* Now it is our turn to inhibit the capability object. */
- obj->cond_waiter = pthread_self ();
-
- if (_hurd_cap_obj_cond_busy (obj))
- {
- _hurd_cap_list_item_t pending_rpc = obj->pending_rpcs;
-
- /* There are still pending RPCs (beside us). Cancel them. */
- while (pending_rpc)
- {
- if (pending_rpc->thread != obj->cond_waiter)
- pthread_cancel (pending_rpc->thread);
- pending_rpc = pending_rpc->next;
- }
-
- /* Indicate that we would like to know when they have gone. */
- obj->state = _HURD_CAP_STATE_YELLOW;
-
- /* The last one will shut the door. */
- do
- {
- pthread_mutex_unlock (&obj->lock);
- err = hurd_cond_wait (&cap_class->obj_cond,
- &cap_class->obj_cond_lock);
- if (err)
- {
- /* We have been canceled ourselves. Give up. */
- obj->state = _HURD_CAP_STATE_GREEN;
- pthread_mutex_unlock (&cap_class->obj_cond_lock);
- return err;
- }
- pthread_mutex_lock (&obj->lock);
- }
- while (obj->state != _HURD_CAP_STATE_RED);
- }
- else
- obj->state = _HURD_CAP_STATE_RED;
-
- /* Now all pending RPCs have been canceled and are completed (except
- us), and all incoming RPCs are inhibited. */
- pthread_mutex_unlock (&obj->lock);
- pthread_mutex_unlock (&cap_class->obj_cond_lock);
-
- return 0;
-}
-
-
-/* Resume RPCs on the capability object OBJ and wake-up all
- waiters. */
-void
-hurd_cap_obj_resume (hurd_cap_obj_t obj)
-{
- hurd_cap_class_t cap_class = obj->cap_class;
-
- pthread_mutex_lock (&cap_class->obj_cond_lock);
- pthread_mutex_lock (&obj->lock);
-
- obj->state = _HURD_CAP_STATE_GREEN;
-
- /* Broadcast the change to all potential waiters. */
- pthread_cond_broadcast (&cap_class->obj_cond);
-
- pthread_mutex_unlock (&obj->lock);
- pthread_mutex_unlock (&cap_class->obj_cond_lock);
-}
diff --git a/libhurd-cap-server/table.c b/libhurd-cap-server/table.c
deleted file mode 100644
index 8a0acd3..0000000
--- a/libhurd-cap-server/table.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/* table.c - Table abstraction implementation.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <assert.h>
-#include <string.h>
-
-#include "table.h"
-
-
-/* Initialize the table TABLE. */
-error_t
-hurd_table_init (hurd_table_t table, unsigned int entry_size)
-{
- assert (sizeof (entry_size) >= sizeof (void *));
-
- *table = (struct hurd_table) HURD_TABLE_INITIALIZER (entry_size);
- return 0;
-}
-
-
-/* Destroy the table TABLE. */
-void
-hurd_table_destroy (hurd_table_t table)
-{
- if (table->data)
- free (table->data);
-}
-
-
-/* The initial table size. */
-#define TABLE_START_SIZE 4
-
-/* Add the table element DATA to the table TABLE. The index for this
- element is returned in R_IDX. Note that the data is added by
- copying ENTRY_SIZE bytes into the table (the ENTRY_SIZE parameter
- was provided at table initialization time). */
-error_t
-hurd_table_enter (hurd_table_t table, void *data, unsigned int *r_idx)
-{
- unsigned int idx;
-
- if (table->used == table->size)
- {
- unsigned int size_new = table->size ? 2 * table->size : TABLE_START_SIZE;
- void *data_new;
-
- data_new = realloc (table->data, size_new * table->entry_size);
- if (!data_new)
- return errno;
-
- table->first_free = table->size;
- table->data = data_new;
- table->size = size_new;
- }
-
- for (idx = table->first_free; idx < table->init_size; idx++)
- if (_HURD_TABLE_ENTRY_LOOKUP (table, idx) == HURD_TABLE_EMPTY)
- break;
-
- /* The following setting for FIRST_FREE is safe, because if this was
- the last table entry, then the table is full and we will grow the
- table the next time we are called (if no elements are removed in
- the meantime. */
- table->first_free = idx + 1;
-
- if (idx == table->init_size)
- table->init_size++;
-
- memcpy (HURD_TABLE_LOOKUP (table, idx), data, table->entry_size);
- table->used++;
- *r_idx = idx;
- return 0;
-}
-
diff --git a/libhurd-cap-server/table.h b/libhurd-cap-server/table.h
deleted file mode 100644
index 4278466..0000000
--- a/libhurd-cap-server/table.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* table.h - Table abstraction interface.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef _HURD_TABLE_H
-#define _HURD_TABLE_H 1
-
-#include <errno.h>
-#include <stdlib.h>
-#include <assert.h>
-
-
-/* The hurd_table data type is a fancy array. At initialization time,
- you have to provide the size ENTRY_SIZE of each table entry. When
- you enter an element, you get an index number in return. This
- index can be used for fast lookup of table elements. You access
- the table elements through pointers to the beginning of the each
- block of ENTRY_SIZE bytes.
-
- Embedded at the beginning of the ENTRY_SIZE bytes in each slot is a
- void pointer. You can use this void pointer freely for your own
- purpose with the following restriction: In a used table entry, it
- must never be NULL. NULL at the beginning of a table entry
- indicates an unused (free) table entry.
-
- The table will grow (and eventually shrink, not yet implemented)
- automatically. New elements are always allocated from the
- beginning of the table. This means that when a new element is
- added, the free slot with the lowest index is always used. This
- makes slot usage predictable and attempts to prevent fragmentation
- and sparse usage.
-
- Note that tables, unlike hashes, can not be reorganized, because
- the index is not stable under reorganization.
-
- Of all operations supported, only lookup is immediate. Entering
- new elements is usually fast, too, unless the first free slot is
- unknown and has to be searched for, or there are no more free slots
- and the table has to be enlarged.
-
- Iterating over the used elements of the table is always
- of the order of the table size.
-
- In the future, removing an element can also shrink the table. In
- order to be able to do this, the implementation keeps track of the
- last used slot. For this reason, the remove operation is sometimes
- not immediate. */
-
-
-/* Because the first element in each table entry is a pointer, the
- table entry should be naturally aligned. */
-#define _HURD_TABLE_ALIGN(x) \
- (((x) + sizeof (void *) - 1) & ~(sizeof (void *) - 1))
-
-
-/* The value used for empty table entries. */
-#define HURD_TABLE_EMPTY (NULL)
-
-struct hurd_table
-{
- /* The size of one entry. Must at least be sizeof (void *). At the
- beginning of each entry, a void * should be present that is
- HURD_TABLE_EMPTY for unused elements and something else for used
- table elements. */
- unsigned int entry_size;
-
- /* The number of allocated table entries. */
- unsigned int size;
-
- /* The number of table entries that are initialized. */
- unsigned int init_size;
-
- /* The number of used table entries. */
- unsigned int used;
-
- /* The index of the lowest entry that is unused. */
- unsigned int first_free;
-
- /* The index after the highest entry that is used. */
- unsigned int last_used;
-
- /* The table data. */
- char *data;
-};
-typedef struct hurd_table *hurd_table_t;
-
-
-#define HURD_TABLE_INITIALIZER(size_of_one) \
- { .entry_size = _HURD_TABLE_ALIGN (size_of_one), .size = 0, \
- .init_size = 0, .used = 0, .first_free = 0, .last_used = 0, \
- .data = NULL }
-
-/* Fast accessor without range check. */
-#define HURD_TABLE_LOOKUP(table, idx) \
- (&(table)->data[(idx) * (table)->entry_size])
-
-/* This is an lvalue for the pointer embedded in the table entry. */
-#define _HURD_TABLE_ENTRY(entry) (*(void **) (entry))
-
-#define _HURD_TABLE_ENTRY_LOOKUP(table, idx) \
- _HURD_TABLE_ENTRY (HURD_TABLE_LOOKUP (table, idx))
-
-
-/* Initialize the table TABLE. */
-error_t hurd_table_init (hurd_table_t table, unsigned int entry_size);
-
-
-/* Destroy the table TABLE. */
-void hurd_table_destroy (hurd_table_t table);
-
-
-/* Add the table element DATA to the table TABLE. The index for this
- element is returned in R_IDX. Note that the data is added by
- copying ENTRY_SIZE bytes into the table (the ENTRY_SIZE parameter
- was provided at table initialization time). */
-error_t hurd_table_enter (hurd_table_t table, void *data, unsigned int *r_idx);
-
-
-/* Lookup the table element with the index IDX in the table TABLE. If
- there is no element with this index, return NULL. Otherwise a
- pointer to the table entry is returned. */
-static inline void *
-hurd_table_lookup (hurd_table_t table, unsigned int idx)
-{
- void *result;
-
- if (idx >= table->init_size)
- return NULL;
-
- result = HURD_TABLE_LOOKUP (table, idx);
- if (_HURD_TABLE_ENTRY (result) == HURD_TABLE_EMPTY)
- return NULL;
-
- return result;
-}
-
-
-/* Remove the table element with the index IDX from the table
- TABLE. */
-static inline void
-hurd_table_remove (hurd_table_t table, unsigned int idx)
-{
- void *entry;
-
- assert (idx < table->init_size);
-
- entry = HURD_TABLE_LOOKUP (table, idx);
- assert (_HURD_TABLE_ENTRY (entry) != HURD_TABLE_EMPTY);
-
- _HURD_TABLE_ENTRY (entry) = HURD_TABLE_EMPTY;
-
- if (idx < table->first_free)
- table->first_free = idx;
-
- if (idx == table->last_used - 1)
- while (--table->last_used > 0)
- if (_HURD_TABLE_ENTRY_LOOKUP (table, table->last_used - 1)
- == HURD_TABLE_EMPTY)
- break;
-
- table->used--;
-}
-
-
-/* Iterate over all elements in the table. You use this macro
- with a block, for example like this:
-
- error_t err;
- HURD_TABLE_ITERATE (table, idx)
- {
- err = foo (idx);
- if (err)
- break;
- }
- if (err)
- cleanup_and_return ();
-
- Or even like this:
-
- HURD_TABLE_ITERATE (ht, idx)
- foo (idx);
-
- The block will be run for every used element in the table. Because
- IDX is already a verified valid table index, you can lookup the
- table entry with the fast macro HURD_TABLE_LOOKUP. */
-#define HURD_TABLE_ITERATE(table, idx) \
- for (unsigned int idx = 0; idx < (table)->init_size; idx++) \
- if (_HURD_TABLE_ENTRY_LOOKUP ((table), (idx)) != HURD_TABLE_EMPTY)
-
-#endif /* _HURD_TABLE_H */
diff --git a/libhurd-cap-server/task-death.c b/libhurd-cap-server/task-death.c
deleted file mode 100644
index 855ed7a..0000000
--- a/libhurd-cap-server/task-death.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/* task-death.c - Task death notifications, implementation.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <pthread.h>
-
-#include <hurd/types.h>
-#include "task-death.h"
-
-
-/* A lock that protects the linked list. It also is held when
- callback handlers are called. */
-pthread_mutex_t hurd_task_death_notify_lock = PTHREAD_MUTEX_INITIALIZER;
-
-/* The linked list of callback handlers. */
-struct hurd_task_death_notify_list_item *hurd_task_death_notify_list;
-
-
-static void *
-task_death_manager (void *unused)
-{
- /* FIXME. Needs to be implement when the task server supports
- it. Do the following:
-
- unsigned int nr_task_ids;
- unsigned int i;
- hurd_task_id_t task_ids[nr_task_ids];
-
- struct hurd_task_death_notify_list_item *item;
-
- pthread_mutex_lock (&hurd_task_death_notify_lock);
- item = hurd_task_death_notify_list;
- while (item)
- {
- for (i = 0; i < nr_task_ids; i++)
- (*item->notify_handler) (item->hook, task_id[i]);
- item = item->next;
- }
- pthread_mutex_unlock (&hurd_task_death_notify_lock);
-
- The only bit missing is the RPC loop to retrieve the dead task ids
- from the task server. This can be a tight loop. */
-
- return 0;
-}
-
-
-/* Start task death notifications. Must be called once at startup. */
-error_t
-hurd_task_death_notify_start (void)
-{
- /* FIXME. Needs to be implement when the task server supports it.
- Start the task_death_manager thread. */
-
- return 0;
-}
diff --git a/libhurd-cap-server/task-death.h b/libhurd-cap-server/task-death.h
deleted file mode 100644
index c4b72a8..0000000
--- a/libhurd-cap-server/task-death.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* task-death.h - Task death notifications, interface.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef _HURD_TASK_DEATH_H
-#define _HURD_TASK_DEATH_H 1
-
-#include <pthread.h>
-
-
-/* We need to keep track of task deaths, because for IPC security we
- hold task info capabilities which block reuse of the respective
- task ID. At task death, we have to release these task info
- capabilities so they become free for reuse. The task server
- provides an interface to get the task IDs of all dead tasks to
- which we still hold task info capabilities.
-
- The following convention applies: Before you start allocating task
- info capabilities, you must register a task death notify handler.
- While you are requesting new task info capabilities and registering
- it with your notify handler, you must take the
- hurd_task_death_notify_lock to prevent task death notifications
- from being processed (FIXME: Write a wrapper function for the task
- server RPC to do this). You can release task info capabilities at
- any time. However, if your notify handler is called, you MUST
- release any task info capability you hold for that task ID. */
-
-
-/* The type of a function callback that you can use to be informed
- about task deaths. */
-typedef void (task_death_notify_t) (void *hook, hurd_task_id_t task_id);
-
-/* The struct you have to use to add your own notification
- handler. */
-struct hurd_task_death_notify_list_item
-{
- /* The following two members are internal. */
- struct hurd_task_death_notify_list_item *next;
- struct hurd_task_death_notify_list_item **prevp;
-
- /* Your callback handler. */
- task_death_notify_t *notify_handler;
-
- /* This is passed as the first argument to your callback
- handler. */
- void *hook;
-};
-
-
-/* A lock that protects the linked list. It also is held when
- callback handlers are called. */
-extern pthread_mutex_t hurd_task_death_notify_lock;
-
-/* The linked list of callback handlers. */
-extern struct hurd_task_death_notify_list_item *hurd_task_death_notify_list;
-
-
-/* Start task death notifications. Must be called once at startup. */
-error_t hurd_task_death_notify_start (void);
-
-
-/* Add the callback handler ITEM to the list. */
-static inline void
-hurd_task_death_notify_add (struct hurd_task_death_notify_list_item *item)
-{
- pthread_mutex_lock (&hurd_task_death_notify_lock);
- if (hurd_task_death_notify_list)
- hurd_task_death_notify_list->prevp = &item->next;
- item->prevp = &hurd_task_death_notify_list;
- item->next = hurd_task_death_notify_list;
- hurd_task_death_notify_list = item;
- pthread_mutex_unlock (&hurd_task_death_notify_lock);
-};
-
-
-/* Remove the callback handler ITEM from the list. */
-static inline void
-hurd_task_death_notify_remove (struct hurd_task_death_notify_list_item *item)
-{
- pthread_mutex_lock (&hurd_task_death_notify_lock);
- if (item->next)
- item->next->prevp = item->prevp;
- *(item->prevp) = item->next;
- pthread_mutex_unlock (&hurd_task_death_notify_lock);
-};
-
-
-/* Suspend processing task death notifications. Call this while
- acquiring new task info capabilities and registering them with your
- notify handler. */
-static inline void
-hurd_task_death_notify_suspend (void)
-{
- pthread_mutex_lock (&hurd_task_death_notify_lock);
-}
-
-/* Resumes processing task death notifications. Call this after
- acquiring new task info capabilities and registering them with your
- notify handler. */
-static inline void
-hurd_task_death_notify_resume (void)
-{
- pthread_mutex_unlock (&hurd_task_death_notify_lock);
-}
-
-#endif /* _HURD_TASK_DEATH_H */
diff --git a/libhurd-cap/ChangeLog b/libhurd-cap/ChangeLog
deleted file mode 100644
index e23fa49..0000000
--- a/libhurd-cap/ChangeLog
+++ /dev/null
@@ -1,6 +0,0 @@
-2005-01-07 Neal H. Walfield <neal@gnu.org>
-
- * cap.c (hurd_cap_init): Supply the allocate_buffer and
- deallocate_buffer arguments to hurd_slab_create to conform with
- the new semantics.
-
diff --git a/libhurd-cap/Makefile.am b/libhurd-cap/Makefile.am
deleted file mode 100644
index fcd1400..0000000
--- a/libhurd-cap/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-# Makefile.am - Makefile template for libhurd-cap.
-# Copyright (C) 2003 Free Software Foundation, Inc.
-# Written by Marcus Brinkmann.
-#
-# This file is part of the GNU Hurd.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free
-# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-# 02111-1307 USA.
-
-lib_LIBRARIES = libhurd-cap.a
-
-includehurddir = $(includedir)/hurd
-includehurd_HEADERS = cap.h
-
-AM_CPPFLAGS = -I$(top_builddir)/include
-libhurd_cap_a_SOURCES = cap.h cap.c cap-user.c #cap-move.c
diff --git a/libhurd-cap/cap-intern.h b/libhurd-cap/cap-intern.h
deleted file mode 100644
index 69ba8f9..0000000
--- a/libhurd-cap/cap-intern.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <hurd/slab.h>
-
-/* The slab space for hurd_cap_t objects. */
-extern hurd_slab_space_t cap_space;
diff --git a/libhurd-cap/cap-move.c b/libhurd-cap/cap-move.c
deleted file mode 100644
index df829fe..0000000
--- a/libhurd-cap/cap-move.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/* cap-move.c - Moving a capability reference from one task to another.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-
-/* FIXME: This is only some pseudo code to get the hang of it. */
-
-/* Sender side. */
-
-/* Send the capability CAP to DEST. */
-error_t
-cap_send (hurd_cap_t cap, hurd_cap_t dest, int copy)
-{
- error_t err;
- hurd_cap_scid_t cont_id;
-
- /* This is a low-level RPC to create a new reference container. */
- err = hurd_cap_server_create_ref_cont
- (cap,
- hurd_task_id_from_thread_id (hurd_cap_get_server_thread (dest)),
- &cont_id);
- if (err)
- return err;
-
- /* This is the actual RPC sending the reference in a message to DEST. */
- err = hurd_SOME_RPC (dest, ...,
- hurd_cap_get_server_thread (cap), cont_id,
- ...);
- if (err)
- {
- /* FIXME: If this fails, then we can only ignore it. */
- hurd_cap_server_destroy_ref_cont (cap, cont_id);
- return err;
- }
-
- /* We have to deallocate the reference container under all
- circumstances. In general, we could trust the server to do it
- automatically when the reference container got accepted, and the
- receiver of the capability indicates success. However, if there
- were a failure we would not know if the reference was received
- already, and as the container ID could be reused once it is
- released, we would have no way to find out. So this is a
- robustness issue, and not a security issue, that forces us to
- keep the container ID alive in the server and destroy it here
- unconditionally. */
- err = hurd_cap_server_destroy_ref_cont (cap, cont_id);
- if (err)
- return err;
-
- /* If we are moving the capability, we can deallocate it now. If we
- are copying the capability, we should of course not do that. */
- if (!copy)
- {
- err = hurd_cap_mod_refs (cap, -1);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-
-/* Receiver side. */
-
-/* Capabilities are received in normal message RPCs. This is the
- server stub of one of them.
-
- FIXME: For now this assumes that server_thread is not implemented
- by this task. */
-error_t
-cap_receive (l4_thread_id_t sender_thread,
- void *some_object, ..., l4_thread_id_t server_thread,
- hurd_cap_scid_t cont_id, ...)
-{
- error_t err;
- hurd_cap_sconn_t sconn;
- hurd_cap_t server_task_info = HURD_CAP_NULL;
- hurd_task_id_t sender_task = hurd_task_id_from_thread_id (sender_thread);
- hurd_cap_scid_t obj_id;
-
- /* We have a chance to inspect the thread ID now and decide if we
- want to accept the handle. */
-#if FOR_EXAMPLE_A_REAUTHENTICATION_REQUEST
- if (server_thread != hurd_cap_get_server_thread (hurd_my_auth ()))
- return EINVAL;
-#endif
-
- /* Acquire a reference to the server connection if it exists. */
- sconn = _hurd_cap_sconn_find (server_thread);
- if (sconn)
- pthread_mutex_unlock (&sconn->lock);
- else
- {
- /* If no connection to this server exists already, prepare to
- create a new one. The sender task ID tells the task server
- that we only want to get the info cap if the sender task
- still lives at the time it is created (because we rely on the
- sender to secure the server task ID). */
-
- /* FIXME: This probably should check for the server being the
- task server. However, the implementation could always
- guarantee that the task server has an SCONN object
- already, so the lookup above would be successful. */
- err = hurd_task_info_create (hurd_task_self (),
- hurd_task_id_from_thread_id (server_thread),
- sender_task,
- &server_task_info);
- if (err)
- return err;
- }
-
- /* This is a very low-level RPC to accept the reference from the
- server. */
- err = hurd_cap_server_accept_ref_cont (server_thread, sender_task,
- cont_id, &obj_id);
- if (err)
- {
- if (sconn)
- {
- pthread_mutex_lock (&sconn->lock);
- _hurd_cap_sconn_dealloc (sconn);
- }
- else
- hurd_cap_mod_refs (server_task_info, -1);
-
- return err;
- }
-
- /* This consumes our references to sconn and server_task_info, if
- any. */
- return _hurd_cap_sconn_enter (sconn, server_task_info,
- server_thread, obj_id);
-}
-
-
-/* The server side. */
-
-/* Containers are created in the sender's user list, but hold a
- reference for the receiver user list. The receiver user list also
- has a section with backreferences to all containers for this
- receiver.
-
- At look up time, the receiver user list can be looked up. Then the
- small list can be searched to verify the request. If it is
- verified, the entry can be removed. Then the sender can be looked
- up to access the real reference container item. The reference for
- the receiver user list should not be released because it will
- usually be consumed for the capability.
-
- Without the small list on the receiver user list side there could
- be DoS attacks where a malicious receiver constantly claims it
- wants to accept a container from another client and keeps the user
- list of that client locked during the verification attempts.
-
- First the sender container is created.
-
- Then the receiver container is created.
-
- Removal of containers works the other way round. This is the most
- robust way to do it, in case the sender destroys the container
- asynchronously with the receiver trying to accept the container.
- First an invalid send container (id) has to be allocated, then the
- receiver container has to be created, and then the sender container
- can be filled with the right receiver container id. */
-
-/* Create a reference container for DEST. */
-error_t
-hurd_cap_server_create_ref_cont_S (l4_thread_id_t sender_thread,
- void *object,
- hurd_task_id_t dest,
- hurd_cap_scid_t *cont_id)
-{
- error_t err;
-
- /* FIXME: Needs to be written in more detail. Here is a list:
-
- 1. Check if a user list for dest exists, if not, create one. Use
- the senders task ID as a constraint to the task_info_create call, to
- ensure we don't create a task info cap if the sender dies in the
- meantime.
-
- Note: Order matters in the following two steps:
-
- 2. Register the container as a sending container with the sender user list.
-
- 3. Register the container as a receiving container with the dest
- user list. The user dest list should have its own small list just
- with containers, and this list should have its own lock. There is
- no precondition for this lock. A non-empty list implies one
- reference to the task info cap (no explicit reference is taken to
- avoid locking problems).
-
- This is how task death notifications should be handled:
-
- If the sender dies, remove the container on both sides, first on the
- receiver side, then on the sender side.
-
- If the receiver dies, remove the container on the receiving side,
- but keep the container on the sender side. Invalidate the container
- on the sender side, so it can not be used anymore (only
- destroyed). */
-}
-
-/* Accept a reference. */
-
-/* This is a very low-level RPC to accept the reference from the
- server. */
-error_t
-hurd_cap_server_accept_ref_cont (l4_thread_id_t sender_thread,
- hurd_task_id_t source,
- hurd_cap_scid_t cont_id,
- hurd_cap_scid_t *obj_id)
-{
- /* FIXME: Write this one. This is what should be done:
-
- 1. Look up the sender user list. In that, lookup the container with
- the given cont id. Check that this containter comes from the task
- SOURCE. (This information should be stored in the sender user list!
- so no lookup of the SOURCE user list is required up to this point.
-
- 2. Now that the validity of the request is confirmed, remove the
- container ID from the receiver side. Unlock the sender user list.
-
- 3. Look up the SOURCE user list. Look up the container. Look up
- the capability that is wrapped by the container. Increase its
- reference. Invalidate the container, but keep it around. Unlock
- the SOURCE user list.
-
- 4. Enter the capability into the SENDER user list. Return its
- ID. */
-}
-
-
-error_t
-hurd_cap_server_destroy_ref_cont (hurd_cap_t cap, hurd_cap_scid_t cont_id)
-{
- /* To be written */
-}
diff --git a/libhurd-cap/cap-user.c b/libhurd-cap/cap-user.c
deleted file mode 100644
index 593d5b4..0000000
--- a/libhurd-cap/cap-user.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/* cap-user.c - User side of the capability implementation.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdlib.h>
-#include <pthread.h>
-#include <assert.h>
-#include <errno.h>
-#include <stdint.h>
-#include <l4/thread.h>
-
-#include <hurd/cap.h>
-
-#include "cap-intern.h"
-
-
-/* This hash table maps server thread IDs to server connections. */
-static struct hurd_ihash server_to_sconn
- = HURD_IHASH_INITIALIZER (HURD_IHASH_NO_LOCP);
-
-/* This lock protects SERVER_TO_SCONN. You can also lock server
- connection objects while holding this lock. */
-static pthread_mutex_t server_to_sconn_lock = PTHREAD_MUTEX_INITIALIZER;
-
-
-/* Deallocate one reference for SCONN, which must be locked.
- SERVER_TO_SCONN_LOCK is not locked. Afterwards, SCONN is unlocked
- (if it still exists). */
-void
-_hurd_cap_sconn_dealloc (hurd_cap_sconn_t sconn)
-{
- assert (sconn->refs > 0);
-
- sconn->refs--;
- if (sconn->refs > 0)
- {
- pthread_mutex_unlock (&sconn->lock);
- return;
- }
-
- /* We have to get SERVER_TO_SCONN_LOCK, but the locking order does
- not allow us to do it directly. So we release SCONN
- temporarily. */
- sconn->refs = 1;
- pthread_mutex_unlock (&sconn->lock);
- pthread_mutex_lock (&server_to_sconn_lock);
- pthread_mutex_lock (&sconn->lock);
- assert (sconn->refs > 0);
- sconn->refs--;
- if (sconn->refs > 0)
- {
- pthread_mutex_unlock (&sconn->lock);
- return;
- }
-
- /* Now we can remove the object. */
- hurd_ihash_remove (&server_to_sconn, sconn->server_thread);
- pthread_mutex_unlock (&server_to_sconn_lock);
-
- /* Finally, we can destroy it. */
- pthread_mutex_unlock (&sconn->lock);
- pthread_mutex_destroy (&sconn->lock);
- hurd_ihash_destroy (&sconn->id_to_cap);
- if (sconn->server_task_info)
- hurd_cap_deallocate (sconn->server_task_info);
- free (sconn);
-}
-
-
-/* Remove the entry for the capability ID SCID from the server
- connection SCONN. SCONN is locked. Afterwards, SCONN is
- unlocked. */
-void
-_hurd_cap_sconn_remove (hurd_cap_sconn_t sconn, l4_word_t scid)
-{
- /* Remove the capability object pointer, which is now invalid. */
- hurd_ihash_remove (&sconn->id_to_cap, scid);
-
- /* FIXME: The following should be some low level RPC to deallocate
- the capability on the server side. If it fails, then what can we
- do at this point? */
- hurd_cap_server_deallocate (sconn->server_thread, scid);
-
- _hurd_cap_sconn_dealloc (sconn);
-}
-
-
-/* Enter a new send capability provided by the server SERVER_THREAD
- (with the task ID reference SERVER_TASK_INFO) and the cap ID SCID.
- SCONN is the server connection for SERVER_THREAD, if known. It
- should be unlocked. If SCONN is NULL, then SERVER_TASK_INFO should
- be the task info capability for the server SERVER_THREAD, otherwise
- it must be HURD_CAP_NULL. Both, SCONN and SERVER_TASK_INFO, are
- consumed if used. If successful, the locked capability is returned
- with one (additional) reference in CAP. The server connection and
- capability object are created if necessary. */
-error_t
-_hurd_cap_sconn_enter (hurd_cap_sconn_t sconn_provided,
- hurd_task_info_t server_task_info,
- l4_thread_id_t server_thread, uint32_t scid,
- hurd_cap_t *cap)
-{
- hurd_cap_sconn_t sconn = sconn_provided;
- int sconn_created = 0;
-
- if (sconn)
- assert (l4_is_thread_equal (sconn->server_thread, server_thread));
- else
- {
- /* It might have become available by now. */
- pthread_mutex_lock (&server_to_sconn_lock);
- sconn = hurd_ihash_find (&server_to_sconn, server_thread);
- if (sconn)
- hurd_cap_deallocate (server_task_info);
- else
- {
- error_t err;
-
- sconn = malloc (sizeof (*sconn));
- if (!sconn)
- {
- pthread_mutex_unlock (&server_to_sconn_lock);
- hurd_cap_deallocate (server_task_info);
- return errno;
- }
- err = pthread_mutex_init (&sconn->lock, NULL);
- if (err)
- {
- free (sconn);
- pthread_mutex_unlock (&server_to_sconn_lock);
- hurd_cap_deallocate (server_task_info);
- return errno;
- }
-
- hurd_ihash_init (&sconn->id_to_cap, HURD_IHASH_NO_LOCP);
- sconn->server_thread = server_thread;
- sconn->server_task_info = server_task_info;
- sconn->refs = 0;
-
- /* Enter the new server connection object. */
- err = hurd_ihash_add (&server_to_sconn, server_thread, sconn);
- if (err)
- {
- pthread_mutex_destroy (&sconn->lock);
- hurd_ihash_destroy (&sconn->id_to_cap);
- free (sconn);
- pthread_mutex_unlock (&server_to_sconn_lock);
- hurd_cap_deallocate (server_task_info);
- return errno;
- }
- }
- }
- pthread_mutex_lock (&sconn->lock);
- pthread_mutex_unlock (&server_to_sconn_lock);
-
- (*cap) = hurd_ihash_find (&sconn->id_to_cap, scid);
- if (!cap)
- {
- error_t err = hurd_slab_alloc (cap_space, cap);
- if (err)
- {
- _hurd_cap_sconn_dealloc (sconn);
- return err;
- }
-
- (*cap)->sconn = sconn;
- (*cap)->scid = scid;
-#if 0
- (*cap)->dead_cb = NULL;
-#endif
-
- err = hurd_ihash_add (&sconn->id_to_cap, scid, *cap);
- if (err)
- {
- _hurd_cap_sconn_dealloc (sconn);
- hurd_slab_dealloc (cap_space, *cap);
- return err;
- }
- }
- pthread_mutex_lock (&(*cap)->lock);
- (*cap)->srefs++;
- /* We have to add a reference for the capability we have added,
- unless we are consuming the reference that was provided. */
- if (!sconn_provided)
- sconn->refs++;
- pthread_mutex_unlock (&sconn->lock);
-
- return 0;
-}
diff --git a/libhurd-cap/cap.c b/libhurd-cap/cap.c
deleted file mode 100644
index c5ed161..0000000
--- a/libhurd-cap/cap.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdlib.h>
-#include <assert.h>
-#include <errno.h>
-#include <stdint.h>
-
-#include <pthread.h>
-
-#include <hurd/cap.h>
-
-#include "cap-intern.h"
-
-
-/* The slab space for capability objects. */
-hurd_slab_space_t cap_space;
-
-
-/* Initialize a new capability, allocated by the slab allocator. */
-static error_t
-cap_constructor (void *hook, void *buffer)
-{
- hurd_cap_t cap = (hurd_cap_t) buffer;
- error_t err;
-
- err = pthread_mutex_init (&cap->lock, NULL);
- if (err)
- return err;
-
- cap->srefs = 0;
- cap->orefs = 0;
-
- /* The other data is filled in by the creator. */
- return 0;
-}
-
-
-/* Release all resources allocated by the capability, which is in its
- freshly initialized state. */
-static void
-cap_destructor (void *hook, void *buffer)
-{
- hurd_cap_t cap = (hurd_cap_t) buffer;
-
- assert (cap->srefs == 0);
- assert (cap->orefs == 0);
-
- pthread_mutex_destroy (&cap->lock);
-}
-
-
-/* Initialize the capability system. */
-error_t
-hurd_cap_init (void)
-{
- return hurd_slab_create (sizeof (struct hurd_cap), 0, NULL, NULL,
- cap_constructor, cap_destructor, NULL,
- &cap_space);
-}
-
-
-/* Modify the number of send references for the capability CAP by
- DELTA. */
-error_t
-hurd_cap_mod_refs (hurd_cap_t cap, int delta)
-{
- hurd_cap_sconn_t sconn;
-
- pthread_mutex_lock (&cap->lock);
-
- /* Verify that CAP->srefs is not 0 and will not become negative. */
- if (cap->srefs == 0 || (delta < 0 && cap->srefs < -delta))
- {
- pthread_mutex_unlock (&cap->lock);
- return EINVAL;
- }
-
- /* Verify that CAP->srefs will not overflow. */
- if (delta > 0 && cap->srefs > UINT32_MAX - delta)
- {
- pthread_mutex_unlock (&cap->lock);
- return EOVERFLOW;
- }
-
- cap->srefs += delta;
- if (cap->srefs != 0)
- {
- pthread_mutex_unlock (&cap->lock);
- return 0;
- }
-
- /* This was the last send reference we held. Deallocate the server
- capability. This is not so easy, though, as some other thread
- might concurrently try to enter a new reference for this
- capability. Instead of doing reference counting for the
- capability IDs in the server connection, we get a temporary
- reference and acquire the server connection lock while the
- capability is temporarily unlocked. Then we can check if we
- still have to deallocate the capabilty. */
- sconn = cap->sconn;
-
- cap->srefs = 1;
- pthread_mutex_unlock (&cap->lock);
-
- /* Now we can try to remove ourselve from the server capability
- list. CAP->sconn will not change while we hold our
- reference. */
- pthread_mutex_lock (&sconn->lock);
- pthread_mutex_lock (&cap->lock);
-
- assert (cap->sconn == sconn);
- assert (cap->srefs != 0);
- cap->srefs--;
- if (cap->srefs != 0)
- {
- /* Someone else came in and got a reference to the almost dead
- send capability. Give up. */
- pthread_mutex_unlock (&cap->lock);
- pthread_mutex_unlock (&sconn->lock);
- return 0;
- }
-
- /* The capability can now finally be removed. */
- _hurd_cap_sconn_remove (sconn, cap->scid);
-
- if (cap->orefs == 0)
- {
- /* Return the capability to the pool. */
- pthread_mutex_unlock (&cap->lock);
- hurd_slab_dealloc (cap_space, (void *) cap);
- }
- else
- pthread_mutex_unlock (&cap->lock);
-
- return 0;
-}
-
-
-/* Modify the number of object references for the capability CAP by
- DELTA. */
-error_t
-hurd_cap_obj_mod_refs (hurd_cap_t cap, int delta)
-{
- hurd_cap_ulist_t ulist;
-
- pthread_mutex_lock (&cap->lock);
-
- /* Verify that CAP->orefs is not 0 and will not become negative. */
- if (cap->orefs == 0 || (delta < 0 && cap->orefs < -delta))
- {
- pthread_mutex_unlock (&cap->lock);
- return EINVAL;
- }
-
- /* Verify that CAP->orefs will not overflow. */
- if (delta > 0 && cap->orefs > UINT32_MAX - delta)
- {
- pthread_mutex_unlock (&cap->lock);
- return EOVERFLOW;
- }
-
- cap->orefs += delta;
- if (cap->orefs != 0)
- {
- pthread_mutex_unlock (&cap->lock);
- return 0;
- }
-
- /* The object is going to be destroyed. Remove the capability from
- each user. This is not so easy, though, as some other thread
- might concurrently try to enter a new reference for this
- capability (for example, because of an incoming RPC). Instead of
- doing reference counting for the capability in each user list, we
- get a temporary reference and acquire the user list lock while
- the capability is temporarily unlocked. Then we can check if we
- still have to deallocate the capabilty. */
- ulist = cap->ouser;
-
- cap->orefs = 1;
- pthread_mutex_unlock (&cap->lock);
-
- /* Now we can try to remove ourselve from the user lists.
- CAP->ulist will not change while we hold our reference. */
- pthread_mutex_lock (&ulist->lock);
- pthread_mutex_lock (&cap->lock);
-
- assert (cap->ouser == ulist);
- assert (cap->orefs != 0);
- cap->orefs--;
- if (cap->orefs != 0)
- {
- /* Someone else came in and got a reference to the almost dead
- capability object. Give up. */
- pthread_mutex_unlock (&cap->lock);
- pthread_mutex_unlock (&ulist->lock);
- return 0;
- }
-
- /* The capability object can now finally be removed. */
- _hurd_cap_ulist_remove (ulist, cap);
- pthread_mutex_unlock (&ulist->lock);
-
- if (cap->srefs == 0)
- {
- /* Return the capability to the pool. */
- pthread_mutex_unlock (&cap->lock);
- hurd_slab_dealloc (cap_space, (void *) cap);
- }
- else
- pthread_mutex_unlock (&cap->lock);
-
- return 0;
-
-}
diff --git a/libhurd-cap/cap.h b/libhurd-cap/cap.h
deleted file mode 100644
index 7d2b012..0000000
--- a/libhurd-cap/cap.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann <marcus@gnu.org>
-
- This file is part of the GNU Hurd.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <pthread.h>
-#include <errno.h>
-
-#include <hurd/ihash.h>
-#include <hurd/slab.h>
-#include <l4/types.h>
-
-typedef l4_word_t hurd_task_info_t;
-
-/* Initialize the capability system. */
-error_t hurd_cap_init (void);
-
-
-/* Capabilities provided by other servers. */
-struct hurd_cap_sconn
-{
- /* The server thread to which messages should be sent. */
- l4_thread_id_t server_thread;
-
- /* A reference for the servers task ID to prevent reuse. This is 0
- if this is the connection to the task server itself. */
- hurd_task_info_t server_task_info;
-
- /* The lock protecting the variable members of the server connection
- object. */
- pthread_mutex_t lock;
-
- /* The number of references to this server connection object. */
- unsigned int refs;
-
- /* A hash mapping the capability IDs to capability objects. */
- struct hurd_ihash id_to_cap;
-};
-typedef struct hurd_cap_sconn *hurd_cap_sconn_t;
-
-
-/* User capabilities. */
-
-/* The task-specific ID for this capability. */
-typedef l4_word_t hurd_cap_scid_t;
-
-
-/* Forward reference. */
-struct hurd_cap_ulist;
-typedef struct hurd_cap_ulist *hurd_cap_ulist_t;
-
-
-/* The capability structure. */
-struct hurd_cap
-{
- /* The lock protecting the capability. This lock protects all the
- members in the capability structure. */
- pthread_mutex_t lock;
-
-
- /* Information for send capabilities. */
-
- /* The number of send references for this capability. If this is 0,
- then this capability can not be used to send messages to the
- server providing the capability. */
- unsigned int srefs;
-
- /* The server connection for this capability. If this is NULL, then
- the capability is dead. */
- hurd_cap_sconn_t sconn;
-
- /* The task-specific ID for this capability. Only valid if SCONN is
- not NULL. */
- hurd_cap_scid_t scid;
-
- /* A callback for the user of the capability, invoked when the
- capability is destroyed. */
-#if 0
- hurd_cap_dead_t dead_cb;
-#endif
-
- /* Information for local capabilities. */
-
- /* The number of object references for this capability. If this is
- 0, the this capability is not implemented locally. */
- unsigned int orefs;
-
- /* The object that is behind this capability. */
- void *odata;
-
- /* A list of remote users. */
- hurd_cap_ulist_t ouser;
-
- /* A callback invoked when the capability is destroyed. */
-#if 0
- hurd_cap_odead_cb_t odead_cb;
-#endif
-
- /* A callback to be invoked when the capability has no more
- senders. */
-#if 0
- hurd_cap_no_sender_cb_t no_sender_cb;
-#endif
-};
-typedef struct hurd_cap *hurd_cap_t;
-
-
-struct hurd_cap_ulist
-{
- /* The lock protecting the variable members of the object. */
- pthread_mutex_t lock;
-};
-
-
-/* Remove the entry for the capability CAP from the user list ULIST.
- ULIST (and the capability CAP) are locked. */
-void _hurd_cap_ulist_remove (hurd_cap_ulist_t ulist, hurd_cap_t cap);
diff --git a/libhurd-cap/headers.m4 b/libhurd-cap/headers.m4
deleted file mode 100644
index 50f378a..0000000
--- a/libhurd-cap/headers.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-# headers.m4 - Autoconf snippets to install links for header files.
-# Copyright 2003 Free Software Foundation, Inc.
-# Written by Marcus Brinkmann <marcus@gnu.org>.
-#
-# This file is free software; as a special exception the author gives
-# unlimited permission to copy and/or distribute it, with or without
-# modifications, as long as this notice is preserved.
-#
-# This file is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
-# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-AC_CONFIG_LINKS([include/hurd/cap.h:libhurd-cap/cap.h])
diff --git a/physmem/ChangeLog b/physmem/ChangeLog
deleted file mode 100644
index 8967ebf..0000000
--- a/physmem/ChangeLog
+++ /dev/null
@@ -1,403 +0,0 @@
-2005-06-24 Neal H. Walfield <neal@gnu.org>
-
- * physmem.c (main): Call frame_entry_init and frame_init before
- calling create_bootstrap_caps.
- Reported by Matthieu Lemerre <racin@free.fr>.
-
-2005-06-22 Neal H. Walfield <neal@gnu.org>
-
- * priv.h (frame_entry_init): Declare.
- (frame_init): Declare.
- * physmem.c (main): Call frame_entry_init and frame_init.
- * frame-entry.c: Create a frame_entry slab class. Don't use
- HURD_SLAB_SPACE_INITIALIZER to initialize frame_entry_space. Do
- it ...
- (frame_entry_init): ... here with hurd_frame_entry_slab_init.
- (frame_entry_constructor): Update argument type given stronger
- type checking.
- (frame_entry_alloc): Call hurd_frame_entry_slab_alloc, not
- hurd_slab_alloc.
- (frame_entry_free): Call hurd_frame_entry_slab_dealloc, not
- hurd_slab_dealloc.
- * frame.c: Create a frame slab class. Don't use
- HURD_SLAB_SPACE_INITIALIZER to initialize frame_space. Do it ...
- (frame_init): ... here with hurd_frame_slab_init.
- (frame_constructor): Update argument type given stronger type
- checking.
- (frame_alloc): Call hurd_frame_slab_alloc, not hurd_slab_alloc.
- (frame_free): Call hurd_frame_slab_dealloc, not hurd_slab_dealloc.
-
-2005-04-06 Neal H. Walfield <neal@gnu.org>
-
- * physmem.h: Move from here...
- * priv.h: ...to here. Improve
- comments.
- (extract_access): New function.
- (struct frame): Add lock field. Change type of may_be_mapped
- field from bool to l4_word_t.
- (struct frame_entry): Add container, shared_next and shared_prevp
- fields.
- (frame_entry_free): Renamed from frame_entry_dealloc. Update
- callers.
- (frame_entry_create): Renamed from frame_entry_new. Update
- callers.
- (frame_entry_use): Renamed from frame_entry_use_frame. Update
- callers.
- (frame_entry_copy): New declaration.
- (frame_entry_destroy): Renamed from frame_entry_drop. Update
- callers.
- (frame_entry_map): New declaration.
- (frame_entry_deallocate): New declaration.
- (frame_memory_bind): Assert that FRAME->LOCK is locked.
- (frame_release): New definition.
- (container_attach): Renamed from frame_entry_attach. Update
- callers.
- (frame_entry_detach): Renamed from frame_entry_detach. Update
- callers.
- * frame.c: Don't include "physmem.h". Include "priv.h" and
- <pthread.h>.
- (frame_dump): New function.
- (frame_constructor): Initialize FRAME->LOCK and lock it.
- (frame_space): It is a space of frame structures, no frame_entry
- structures. Fix typo.
- (frame_alloc): Initialize FRAME_MAY_BE_MAPPED. Assert FRAME->LOCK
- is locked.
- (frame_deref): When unmapping a frame, assert that FRAME->MEMORY
- points to memory. Unlock FRAME->LOCK on return. Add sanity
- checks.
- (frame_add_user): Assert that FRAME->LOCK is locked.
- (frame_drop_user): Assert that FRAME->LOCK is locked. Don't drop
- a reference.
- * frame-entry.c: Include <string.h> and "zalloc.h".
- (frame_entry_constructor): New function.
- (frame_entry_space): Use it.
- (frame_entry_dump): Don't mark as static. Improve output. Add
- some sanity checks.
- (frame_entry_alloc): Assert that FRAME_ENTRY->SHARED_NEXT and
- FRAME_ENTRY->SHARED_PREVP are correct.
- (frame_entry_free): Add sanity checks.
- [!NDEBUG]: Clear memory.
- (frame_entry_share_with): New function.
- (frame_entry_create): Assert CONT->LOCK is held. Initialize
- FRAME_ENTRY->CONTAINER. Initialize share list.
- (frame_entry_copy): New function.
- (frame_entry_use): Assert CONT->LOCK and SOURCE->LOCK are held.
- Set FRAME_ENTRY->CONTAINER. Initialize share list.
- (frame_entry_destroy): If CONT is NULL, don't try to detach
- FRAME_ENTRY from a container. Assert CONT->LOCK in CONT is
- non-NULL and SOURCE->LOCK are held. Correctly update COW count.
- Only unlock FRAME_ENTRY->LOCK if DO_UNLOCK is true.
- (frame_entry_attach): Moved to container.c and renamed to
- container_attach.
- (frame_entry_detach): Moved to container.c and renamed to
- container_detach.
- (frame_entry_map): New function.
- (frame_entry_deallocate): New function.
- * container.c: Include <string.h> and "priv.h".
- (container_dump): New function.
- (container_attach): Moved from frame-entry.c and renamed from
- frame_entry_attach.
- (container_detach): Moved from frame-entry.c and renamed from
- frame_entry_detach.
- (container_allocate): Lock CONT->LOCK. Don't check the return
- value of FRAME_ENTRY_ALLOC for errors: it is guaranteed to
- succeed. Unlock FE->FRAME->LOCK and CONT->LOCK.
- (container_deallocate): Lock CONT->LOCK. Refactor iteration.
- Rewrite body to use frame_entry_deallocate. Return the number of
- bytes allocated.
- (container_map): Refactor iteration. Rewrite body to use
- frame_entry_map.
- (container_copy): If HURD_PM_CONT_ALL_OR_NONE is set try better to
- make sure the operation won't fail mid-flight. Lock
- SFE->FRAME->LOCK. If the copy is a copy on write and the frame
- has extant write mappings, remove them.
- (container_ops): Moved to physmem.h. Preface names with hurd_pm_
- to reflect presence in the global name space.
- (container_demuxer): If the invoke method produces returns an
- error code, emit a debugging message.
- (container_alloc) [!NDEBUG]: Lock CONT->LOCK.
- Unlock FE->FRAME->LOCK.
- (container_init): New function.
- (container_reinit): Assert CONT->LOCK is held.
- (container_class_init): Use CONTAINER_INIT.
-
- * physmem.h: New file.
- * headers.m4: New file.
- * physmem.c: Don't include "physmem.h". Include "priv.h".
- * ia32-cmain.c: Likewise.
-
-2005-03-09 Neal H. Walfield <neal@gnu.org>
-
- * container.c (container_allocate): Set err to ENOMEM when
- frame_entry_alloc fails.
-
-2005-03-09 Neal H. Walfield <neal@gnu.org>
-
- * container.c (container_map): Update argument marshalling.
- Sanity check SIZE and VADDR.
- (container_copy): Strengthen the argument checking.
-
-2005-03-08 Neal H. Walfield <neal@gnu.org>
-
- * container.c (container_create): Lock OBJ before calling
- hurd_cap_obj_drop.
- Reported by Matthieu Lemerre <racin@free.fr>.
-
-2005-03-08 Neal H. Walfield <neal@gnu.org>
-
- * physmem.h (struct frame): Add field cow.
- (frame_entry_use_frame): Rename from frame_entry_use. Change
- interface such that FRAME must refer to a valid frame. Update
- users.
- (frame_add_user): Rename from frame_add. Update users.
- (frame_drop_user): Rename from frame_drop. Update users.
- * container.c (container_map): Take FRAME_ENTRY->FRAME_OFFSET
- into account when determining the physical memory.
- (container_copy): New function.
- (enum container_ops): Add container_copy_id.
- (container_demuxer): Recognize container_copy_id.
- * frame-entry.c (frame_entry_new): Allocate FRAME_ENTRY->frame.
- Add FRAME_ENTRY as a user of the allocated frame. Update callers.
- (frame_entry_use_frame): Add FRAME_ENTRY as a user of FRAME.
- (frame_entry_drop): Drop FRAME_ENTRY as a user of the underlying
- frame.
- * frame.c (frame_constructor): Initialize FRAME->cow.
- (frame_alloc): Add an assert.
-
-2005-01-11 Neal H. Walfield <neal@gnu.org>
-
- * Makefile.am (physmem_SOURCES): Add frame-entry.c and frame.c
- (physmem_LDADD): Add ../libhurd-btree/libhurd-btree.a.
- * frame-entry.c: New file.
- * frame.c: New file.
- * README: Rewrite.
- * container.c: Likewise.
- * physmem.h: Likewise.
- * physmem.c (create_bootstrap_caps): Change container_t to struct
- container *.
-
-2005-01-07 Neal H. Walfield <neal@gnu.org>
-
- * output.h (debug): Preface __VA_ARGS__ with ## thereby making it
- optional.
-
-2004-11-17 Neal H. Walfield <neal@gnu.org>
-
- * Makefile.am (bootdir): New variable.
- (boot_PROGRAMS): Use this instead of noinst_PROGRAMS.
-
-2004-11-17 Neal H. Walfield <neal@gnu.org>
-
- * output.h (debug): Include program_name and __FUNCTION__ in
- output.
- * physmem.c (create_bootstrap_caps): First argument to
- debug must be a constant format string.
-
-2004-11-01 Marcus Brinkmann <marcus@gnu.org>
-
- * container.c (struct container, container_t): Remove member OBJ.
- Move struct and typedef to ...
- * physmem.h (struct container, container_t): ... here.
- (container_alloc): Change type of last argument in prototype to a
- pointer to a container_t.
- * container.c (container_reinit, container_map): Use
- hurd_cap_obj_to_user instead cast.
- (container_class_init): Provide type instead size and alignment.
- (container_alloc): Add new variable OBJ and use hurd_cap_obj_to_user.
- Change type of last argument to a pointer to container_t.
- * physmem.c (create_bootstrap_caps): New variable CONTAINER.
- Use hurd_cap_obj_from_user to get at the object.
-
- * container.c: Include "zalloc.h".
-
- * physmem.c (create_bootstrap_caps): Remove unused variables
- STARTUP_CAP, NR_FPAGES, FPAGES.
-
-2004-10-29 Marcus Brinkmann <marcus@gnu.org>
-
- * ia32-cmain.c (switch_thread): Correct start of small sub stack
- address. Reported by Rian Hunter <hurd@thelaststop.net>.
-
-2004-10-28 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c (get_task_cap): Removed.
- (bootstrap_final): New function.
- (main): Call bootstrap_final, not get_task_cap.
-
-2004-10-27 Marcus Brinkmann <marcus@gnu.org>
-
- * container.c (container_map): Call l4_fpage_xspan instead of
- l4_fpage_span.
-
-2004-10-25 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c (physmem_server): Call hurd_cap_bucket_worker_alloc.
-
-2004-10-20 Marcus Brinkmann <marcus@gnu.org>
-
- * malloc-wrap.c: Do not include zalloc.h here.
- (USE_MALLOC_LOCK): Define.
-
-2004-07-16 Bas Wijnen <b.wijnen@phys.rug.nl>
-
- * physmem.c (physmem_server): Added missing parameter.
-
-2004-04-26 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c: Include <hurd/wortel.h>.
- (wortel_thread_id, wortel_cap_id): New variables.
- (get_all_memory): Rewritten using wortel interface.
- (create_bootstrap_caps): Likewise.
- (get_threads): Likewise.
- (get_task_cap): Likewise.
- (main): Initialize wortel_thread_id.
- * output.c (shutdown): Include <hurd/wortel.h>.
- (shutdown): Rewritten using wortel interface.
- (putchar): Rewritten using wortel interface.
-
-2004-04-15 Marcus Brinkmann <marcus@gnu.org>
-
- * container.c (container_map): New function.
- (container_demux): Call container_map.
-
-2004-04-11 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c (get_task_cap): New function.
- (main): Call get_task_cap.
-
- * container.c (container_reinit): Unmap the fpages before
- returning them to the pool.
-
-2004-04-10 Marcus Brinkmann <marcus@gnu.org>
-
- * container.c: Include <l4/space.h>. Use L4_FPAGE_SPAN_MAX
- instead MAX_FPAGES everywhere.
- (container_reinit): New function.
- (container_class_init): Pass container_reinit as reinit function.
- (MAX_FPAGES): Removed.
-
-2004-04-09 Marcus Brinkmann <marcus@gnu.org>
-
- * container.c: Revert last change.
- * physmem.h: Revert last change.
- * physmem.c (MAX_FPAGES): Removed macro.
- (create_bootstrap_caps): Use L4_FPAGE_SPAN_MAX, not MAX_FPAGES.
- Update bootstrap cap code to new wortel interface (essentially
- reverting the last change).
-
- * physmem.h: Include <stdbool.h>.
- * container.c (struct container): Add member mapped.
- (container_alloc): Add new argument mapped. Initialize
- CONTAINER->mapped.
-
- * Makefile.am (physmem_LDADD): Add
- ../libhurd-cap-server/libhurd-cap-server.a and
- ../libhurd-slab/libhurd-slab.a.
- (physmem_SOURCES): Add container.c.
- * container.c: New file.
- * physmem.h: Include <hurd/cap-server.h> and <errno.h>.
- (container_class_init, container_alloc): New prototypes.
- * physmem.c (create_bootstrap_caps): Take new argument bucket.
- Create container capabilities and inject them into the bucket.
- (getpagesize): New temporary helper function.
- (physmem_server): New function.
- (main): New variable BUCKET. Create capability bucket. Pass
- BUCKET to create_bootstrap_caps. New variable MANAGER. Create
- manager thread.
-
-2004-04-06 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c (setup_threads): Add threads to pool after creating
- main thread. Don't overwrite server_thread with an incorrect
- value at the end.
-
-2004-03-19 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c (WORTEL_MSG_GET_THREADS): New macro.
- (setup_threads): Request number of extra threads from wortel. Add
- them to the pool.
-
- * output.h (shutdown): Add noreturn attribute to prototype.
- * output.c (shutdown): Add noreturn attribute. Sleep forever.
-
- * config.m4 (HURD_LOAD_ADDRESS): Change load address to 0x400000.
- * physmem.c: Include pthread.h.
- (setup_threads): New function.
- (main): New variable SERVER_THREAD. Call setup_threads.
- (exit, abort): New functions.
- * physmem.h (switch_thread): Add prototype.
- * ia32-cmain.c (switch_thread): New function.
- (__thread_stack_pointer): New macro.
- (__thread_set_stack_pointer): New macro.
- * Makefile.am (physmem_LDADD): Add ../libpthread/libpthread.a and
- ../libhurd-ihash/libhurd-ihash.a.
-
-2004-03-16 Marcus Brinkmann <marcus@gnu.org>
-
- * output.c (putchar): Replace all &msg with msg. Update
- everything to new API.
- * physmem.c: Likewise.
-
-2003-10-26 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (physmem_CPPFLAGS): Use top_builddir for include
- directory.
-
-2003-10-24 Daniel Wagner <wagi@gmx.ch>
-
- * Makefile.am (physmem_CPPFLAGS): Use top_scrdir instead of
- top_builddir.
-
-2003-10-16 Marcus Brinkmann <marcus@gnu.org>
-
- * output.c (shutdown): New function.
- * physmem.c: Include <stdlib.h>.
- (get_all_memory): Update to match new wortel interface.
- (create_bootstrap_caps): New function.
- (main): Call create_bootstrap_caps.
-
-2003-10-12 Marcus Brinkmann <marcus@gnu.org>
-
- * config.m4: New file.
- * Makefile.am (physmem_LDFLAGS): Replace load address with
- @HURD_PHYSMEM_LOAD_ADDRESS@.
-
-2003-09-24 Marcus Brinkmann <marcus@gnu.org>
-
- * mmap.c: New file.
- * Makefile.am (physmem_SOURCES): Add mmap.c.
- * malloc-wrap.c (LACKS_SYS_MMAN_H, mmap, munmap, PROT_READ,
- PROT_WRITE, MAP_PRIVATE, MAP_ANONYMOUS, MUNMAP_FAILURE): Remove
- macros.
-
-2003-09-22 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (AM_CPPFLAGS, physmem_CFLAGS): Removed.
- (physmem_CPPFLAGS): New variable.
- (physmem_SOURCES): Add malloc-wrap.c.
- (EXTRA_physmem_SOURCES): New variable.
- * malloc.c, malloc-wrap.c: New files.
-
-2003-09-21 Marco Gerards <metgerards@student.han.nl>
-
- * Makefile.am (AM_CPPFLAGS): New variable.
-
-2003-09-19 Marcus Brinkmann <marcus@gnu.org>
-
- * physmem.c: Include "zalloc.h".
- * zalloc.c: Include <string.h>, not <strings.h>.
- (add_block): Add missing return type.
- (zalloc_dump_zones): Use %p for pointer.
-
- * zalloc.c (L4_MIN_PAGE_SHIFT): Remove macro.
- (ZONE_SIZE): Use L4_MIN_PAGE_SIZE_LOG2 instead of L4_MIN_PAGE_SHIFT.
- (ZONES): Likewise.
- (zfree): Likewise.
- (zalloc): Likewise.
-
- * physmem/Makefile.am (physmem_SOURCES): Remove getpagesize.c.
- * getpagesize.c: File removed.
- * physmem/zalloc.c (zfree): Use l4_min_page_size() instead
- getpagesize().
- (zalloc): Likewise.
- (zalloc_dump_zones): Likewise.
diff --git a/physmem/Makefile.am b/physmem/Makefile.am
deleted file mode 100644
index d3eb081..0000000
--- a/physmem/Makefile.am
+++ /dev/null
@@ -1,47 +0,0 @@
-# Makefile.am - Makefile template for the physical memory server.
-# Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
-# Written by Marcus Brinkmann.
-#
-# This file is part of the GNU Hurd.
-#
-# The GNU Hurd is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# The GNU Hurd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
-
-if ARCH_IA32
- ARCH_SOURCES = ia32-crt0.S ia32-cmain.c
-endif
-
-bootdir = $(prefix)/boot
-boot_PROGRAMS = physmem
-
-physmem_CPPFLAGS = -I$(top_builddir)/include \
- -I$(top_srcdir)/libc-parts $(AM_CPPFLAGS)
-
-physmem_SOURCES = $(ARCH_SOURCES) \
- output.h output.c \
- zalloc.h zalloc.c mmap.c malloc-wrap.c \
- physmem.h physmem.c container.c frame-entry.c frame.c
-
-# Doug Lea's malloc is included by malloc-wrap.c.
-EXTRA_physmem_SOURCES = malloc.c
-
-physmem_LDFLAGS = -u_start -e_start -N -nostdlib \
- -Ttext=@HURD_PHYSMEM_LOAD_ADDRESS@
-
-physmem_LDADD = ../libhurd-cap-server/libhurd-cap-server.a \
- ../libhurd-slab/libhurd-slab.a \
- ../libpthread/libpthread.a \
- ../libhurd-ihash/libhurd-ihash.a \
- ../libhurd-btree/libhurd-btree.a \
- ../libc-parts/libc-parts.a -lgcc
diff --git a/physmem/README b/physmem/README
deleted file mode 100644
index c9be1be..0000000
--- a/physmem/README
+++ /dev/null
@@ -1,89 +0,0 @@
-Introduction
-============
-
-physmem is the physical memory manager. It has nothing to do with
-virtual memory management; that is the domain of the applications
-themselves.
-
-The physical memory server provides three different resources: memory
-control capabilities, containers and frames.
-
-Memory Control Ports
---------------------
-
-Memory control capabilities hold the right to guarantee a number of
-frames. A memory control capability may be split. If a memory
-control capability, A, holds the right to 100 frames and a new memory
-control capability, B, is split from it to hold 10 frames, after the
-operation, A guarantees 90 frames and B 10.
-
-In this scenario, the B is considered the child of the A. If B is
-destroyed it (as well as any memory control ports which have been
-split from it) are absorbed back into A and any containers and frames
-allocated using it are implicit deallocated.
-
-When a task is started, the starter may split its memory control
-capability and give a copy of the new capability to the new task.
-Alternatively, two tasks may share a memory control capability.
-
-In the former scenario, when the starter wants to terminate the child,
-it may reclaim the frames by destroying the memory control capability.
-
-Containers
-----------
-
-physmem will allocate a container given a memory control capability.
-When a frame is allocated into the container, the memory control
-capability from which it was created it charged.
-
-A container represents a name space. Valid container names (integers)
-refer to bytes in a frame. The contents of container are not directly
-accessible to tasks. A task must first map the contents of a
-container into its virtual address space.
-
-Containers are used to share memory with other processes. For
-instance, in the case of a client of a filesystem. The client creates
-a container with a number of pages and sends it to the filesystem
-which reads the data from backing store into the memory. Since the
-client may not be able to trust the server to not steal the physical
-frames, the client must not give the capability to it. Hence, a
-weaker capability is provided which allows a server limited access to
-a container.
-
-Frames
-------
-
-Frames are allocated in containers at particular addresses. A client
-may allocate any number of base page size frames at once (assuming
-that there is enough credit in the memory control capability).
-Internally, this range will immediately be converted to power of 2
-frames.
-
-Once allocated, a task may request a map of a frame from the physical
-memory server. Maps are not guaranteed to be persistent: physmem may
-rearrange physical memory to defragment it or to clear space in a
-special zone (for instance, for DMA). Frames, may, however, be pinned
-in place for a limited amount of time.
-
-Frames are multiplexed as well as shared across multiple tasks, it is
-useful to reallocate frames in place. In this way, data structures
-are not torn down just to be immediately recreated and gratuitous COWs
-are not performed. container_release disassociates frames in a region
-with any shared frames.
-
-When a frame is deallocated, physmem may not immediately unmap it from
-the client where it is safe to do so (i.e. without leaking information
-or giving a task access which it should not have). This is useful in
-the case of highly shared read-only memory, e.g. shared libraries.
-
-Data Structures
-===============
-
-Given a container capability, a `struct container' can be derived from
-it. A container contains a btree of frame entries keyed by their
-start index and size.
-
-A frame entry points to a `struct frame'. Exactly one frame entry
-exists for each mapping of a frame. Hence, the frame entry is
-per-mapping state and a frame is per-frame state. Frames are
-reference counted.
diff --git a/physmem/config.m4 b/physmem/config.m4
deleted file mode 100644
index 9cbcd1f..0000000
--- a/physmem/config.m4
+++ /dev/null
@@ -1,21 +0,0 @@
-# config.m4 - Configure snippet for physmem.
-# Copyright (C) 2003 Free Software Foundation, Inc.
-# Written by Maurizio Boriani.
-#
-# This file is part of the GNU Hurd.
-#
-# The GNU Hurd is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2, or (at
-# your option) any later version.
-#
-# The GNU Hurd is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
-
-HURD_LOAD_ADDRESS(physmem, 0x400000)
diff --git a/physmem/container.c b/physmem/container.c
deleted file mode 100644
index 76a8d01..0000000
--- a/physmem/container.c
+++ /dev/null
@@ -1,718 +0,0 @@
-/* container.c - container class for physical memory server.
- Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
- Written by Neal H. Walfield <neal@gnu.org>.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with the GNU Hurd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <pthread.h>
-#include <compiler.h>
-#include <l4.h>
-
-#include <hurd/cap-server.h>
-#include <hurd/btree.h>
-
-#include "priv.h"
-#include "physmem.h"
-#include "zalloc.h"
-
-#include "output.h"
-
-
-static struct hurd_cap_class container_class;
-
-static inline void
-container_dump (struct container *cont)
-{
- struct frame_entry *fe;
-
- printf ("Container %x: ", cont);
- for (fe = hurd_btree_frame_entry_first (&cont->frame_entries); fe;
- fe = hurd_btree_frame_entry_next (fe))
- printf ("fe:%x %x+%x@%x on %x:%x+%x ",
- fe, fe->region.start, fe->region.size, fe->frame_offset,
- fe->frame, l4_address (fe->frame->memory),
- l4_size (fe->frame->memory));
- printf ("\n");
-}
-
-error_t
-container_attach (struct container *cont, struct frame_entry *frame_entry)
-{
- error_t err;
-
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
-
- err = hurd_btree_frame_entry_insert (&cont->frame_entries, frame_entry);
- if (! err)
- frame_entry->container = cont;
-
- return err;
-}
-
-void
-container_detach (struct container *cont, struct frame_entry *frame_entry)
-{
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
- assert (hurd_btree_frame_entry_find (&cont->frame_entries,
- &frame_entry->region));
- assert (cont == frame_entry->container);
-
- hurd_btree_frame_entry_detach (&cont->frame_entries, frame_entry);
-}
-
-/* CTX->obj should be a memory control object, not a container. */
-static error_t
-container_create (hurd_cap_rpc_context_t ctx)
-{
- error_t err;
- hurd_cap_obj_t obj;
- hurd_cap_handle_t handle;
-
- l4_msg_clear (ctx->msg);
-
- err = hurd_cap_class_alloc (&container_class, &obj);
- if (err)
- return err;
- hurd_cap_obj_unlock (obj);
-
- err = hurd_cap_bucket_inject (ctx->bucket, obj, ctx->sender, &handle);
- if (err)
- {
- hurd_cap_obj_lock (obj);
- hurd_cap_obj_drop (obj);
- return err;
- }
-
- /* The reply message consists of a single word, a capability handle
- which the client can use to refer to the container. */
-
- l4_msg_append_word (ctx->msg, handle);
-
- return 0;
-}
-
-static error_t
-container_share (hurd_cap_rpc_context_t ctx)
-{
- return EOPNOTSUPP;
-}
-
-static error_t
-container_allocate (hurd_cap_rpc_context_t ctx)
-{
- error_t err;
- struct container *cont = hurd_cap_obj_to_user (struct container *, ctx->obj);
- l4_word_t flags = l4_msg_word (ctx->msg, 1);
- uintptr_t start = l4_msg_word (ctx->msg, 2);
- size_t size = l4_msg_word (ctx->msg, 3);
- size_t amount;
- int i;
-
- /* We require three arguments (in addition to the cap id): the
- flags, the start and the size. */
- if (l4_untyped_words (l4_msg_msg_tag (ctx->msg)) != 4)
- {
- debug ("incorrect number of arguments passed. require 4 but got %d\n",
- l4_untyped_words (l4_msg_msg_tag (ctx->msg)));
- l4_msg_clear (ctx->msg);
- return EINVAL;
- }
-
- /* Allocate the memory. */
- l4_fpage_t fpages[L4_FPAGE_SPAN_MAX];
- int nr_fpages = l4_fpage_span (0, size - 1, fpages);
-
- pthread_mutex_lock (&cont->lock);
-
- for (err = 0, amount = 0, i = 0; i < nr_fpages; i ++)
- {
- /* FIXME: Check to make sure that the memory control object that
- this container refers to has enough memory to do each
- allocation. */
- struct frame_entry *fe = frame_entry_alloc ();
- assert (fe);
-
- err = frame_entry_create (cont, fe, start + l4_address (fpages[i]),
- l4_size (fpages[i]));
- if (err)
- {
- frame_entry_free (fe);
- break;
- }
-
- amount += l4_size (fpages[i]);
-
- /* XXX: Use the flags.
- frame->flags = flags; */
-
- pthread_mutex_unlock (&fe->frame->lock);
- }
-
- pthread_mutex_unlock (&cont->lock);
-
- l4_msg_clear (ctx->msg);
- l4_msg_append_word (ctx->msg, amount);
-
- return err;
-}
-
-static error_t
-container_deallocate (hurd_cap_rpc_context_t ctx)
-{
- error_t err = 0;
- struct container *cont = hurd_cap_obj_to_user (struct container *, ctx->obj);
- uintptr_t start = l4_msg_word (ctx->msg, 1);
- const size_t size = l4_msg_word (ctx->msg, 2);
- size_t remaining = size;
-
- /* We require two arguments (in addition to the cap id): the start
- and the size. */
- if (l4_untyped_words (l4_msg_msg_tag (ctx->msg)) != 3)
- {
- debug ("incorrect number of arguments passed. require 3 but got %d\n",
- l4_untyped_words (l4_msg_msg_tag (ctx->msg)));
- l4_msg_clear (ctx->msg);
- return EINVAL;
- }
-
- l4_msg_clear (ctx->msg);
-
- pthread_mutex_lock (&cont->lock);
-
- if ((size & (L4_MIN_PAGE_SIZE - 1)) != 0)
- {
- err = EINVAL;
- goto out;
- }
-
- struct frame_entry *next = frame_entry_find (cont, start, 1);
- if (! next)
- goto out;
-
- if (((start - next->region.start) & (L4_MIN_PAGE_SIZE - 1)) != 0)
- {
- err = EINVAL;
- goto out;
- }
-
- while (next && remaining > 0)
- {
- struct frame_entry *fe = next;
-
- /* We must get the region after FE before we potentially
- deallocate FE. */
- if (fe->region.start + fe->region.size < start + remaining)
- /* The region to deallocate extends beyond FE. */
- {
- next = hurd_btree_frame_entry_next (fe);
- if (next && fe->region.start + fe->region.size != next->region.start)
- /* NEXT does not immediately follow FE. */
- next = 0;
- }
- else
- /* FE is the last frame entry to process. */
- next = 0;
-
- /* The number of bytes to deallocate in this frame entry. */
- size_t length = fe->region.size - (start - fe->region.start);
- if (length > remaining)
- length = remaining;
- assert (length > 0);
-
- pthread_mutex_lock (&fe->frame->lock);
- err = frame_entry_deallocate (cont, fe, start, length);
- if (err)
- goto out;
-
- start += length;
- remaining -= length;
- }
-
- out:
- pthread_mutex_unlock (&cont->lock);
-
- if (remaining > 0)
- debug ("no frame entry at %x (of container %x) but %x bytes "
- "left to deallocate!\n", start, cont, remaining);
-
- /* Return the amount actually deallocated. */
- l4_msg_append_word (ctx->msg, size - remaining);
-
- return err;
-}
-
-static error_t
-container_map (hurd_cap_rpc_context_t ctx)
-{
- error_t err = 0;
- struct container *cont = hurd_cap_obj_to_user (struct container *, ctx->obj);
- l4_word_t flags = l4_msg_word (ctx->msg, 1);
- uintptr_t vaddr = l4_msg_word (ctx->msg, 2);
- uintptr_t index = l4_msg_word (ctx->msg, 3);
- size_t size = l4_msg_word (ctx->msg, 4);
-
- /* We require four arguments (in addition to the cap id). */
- if (l4_untyped_words (l4_msg_msg_tag (ctx->msg)) != 5)
- {
- debug ("incorrect number of arguments passed. require 5 but got %d\n",
- l4_untyped_words (l4_msg_msg_tag (ctx->msg)));
- l4_msg_clear (ctx->msg);
- return EINVAL;
- }
-
- l4_msg_clear (ctx->msg);
-
-#if 0
- printf ("container_map (index:%x, size:%x, vaddr:%x, flags: %x)\n",
- index, size, vaddr, flags);
-#endif
-
- /* SIZE must be a multiple of the minimum page size and VADDR must
- be aligned on a base page boundary. */
- if ((size & (L4_MIN_PAGE_SIZE - 1)) != 0
- || (vaddr & (L4_MIN_PAGE_SIZE - 1)) != 0)
- return EINVAL;
-
- pthread_mutex_lock (&cont->lock);
-
- struct frame_entry *fe;
- for (fe = frame_entry_find (cont, index, 1);
- fe && size > 0;
- fe = hurd_btree_frame_entry_next (fe))
- {
- if (index < fe->region.start)
- /* Hole between last frame and this one. */
- {
- err = EINVAL;
- break;
- }
-
- uintptr_t offset = index - fe->region.start;
- if ((offset & (getpagesize () - 1)))
- /* Not properly aligned. */
- {
- err = EINVAL;
- break;
- }
-
- size_t len = fe->region.size - offset;
- if (len > size)
- len = size;
-
- size_t amount;
-
- pthread_mutex_lock (&fe->frame->lock);
- err = frame_entry_map (fe, offset, len, extract_access (flags), vaddr,
- ctx->msg, &amount);
- pthread_mutex_unlock (&fe->frame->lock);
-
- assert (! err || err == ENOSPC);
-
- index += amount;
- size -= amount;
- vaddr += amount;
-
- if (err == ENOSPC)
- {
- err = 0;
- break;
- }
- }
-
- pthread_mutex_unlock (&cont->lock);
-
- return err;
-}
-
-static error_t
-container_copy (hurd_cap_rpc_context_t ctx)
-{
- error_t err = 0;
- struct hurd_cap_ctx_cap_use *cap_use;
- struct container *src_cont = hurd_cap_obj_to_user (struct container *,
- ctx->obj);
-
- /* SRC_START will move as we copy data; SRC_START_ORIG stays
- constant so that we can figure out how much we have copied. */
- uintptr_t src_start = l4_msg_word (ctx->msg, 1);
- const uintptr_t src_start_orig = src_start;
-
- l4_word_t dest_cont_handle = l4_msg_word (ctx->msg, 2);
- hurd_cap_obj_t dest_cap;
- struct container *dest_cont;
-
- uintptr_t dest_start = l4_msg_word (ctx->msg, 3);
-
- size_t count = l4_msg_word (ctx->msg, 4);
- size_t flags = l4_msg_word (ctx->msg, 5);
-
- struct frame_entry *sfe_next;
- int nr_fpages;
- l4_fpage_t fpages[L4_FPAGE_SPAN_MAX];
- int i;
-
- /* We require five arguments (in addition to the cap id). */
- if (l4_untyped_words (l4_msg_msg_tag (ctx->msg)) != 6)
- {
- debug ("incorrect number of arguments passed. require 6 but got %d\n",
- l4_untyped_words (l4_msg_msg_tag (ctx->msg)));
- l4_msg_clear (ctx->msg);
- return EINVAL;
- }
-
- l4_msg_clear (ctx->msg);
-
- if (ctx->handle == dest_cont_handle)
- /* The source container is the same as the destination
- container. */
- {
- dest_cont = src_cont;
- pthread_mutex_lock (&src_cont->lock);
- }
- else
- /* Look up the destination container. */
- {
- cap_use = alloca (hurd_cap_ctx_size ());
- err = hurd_cap_ctx_start_cap_use (ctx,
- dest_cont_handle, &container_class,
- cap_use, &dest_cap);
- if (err)
- goto out;
-
- hurd_cap_obj_unlock (dest_cap);
-
- dest_cont = hurd_cap_obj_to_user (struct container *, dest_cap);
-
- /* There is a possible dead lock scenario here: one thread
- copies from SRC to DEST and another from DEST to SRC. We
- lock based on the lexical order of the container
- pointers. */
- if (src_cont < dest_cont)
- {
- pthread_mutex_lock (&src_cont->lock);
- pthread_mutex_lock (&dest_cont->lock);
- }
- else
- {
- pthread_mutex_lock (&dest_cont->lock);
- pthread_mutex_lock (&src_cont->lock);
- }
- }
-
- if ((flags & HURD_PM_CONT_ALL_OR_NONE))
- /* Don't accept a partial copy. */
- {
- /* XXX: Make sure that all of the source is defined and has
- enough permission. */
-
- /* Check that no frames are located in the destination
- region. */
- struct frame_entry *fe = frame_entry_find (dest_cont, dest_start,
- count);
- if (fe)
- {
- err = EEXIST;
- goto clean_up;
- }
- }
-
- /* Find the frame entry in the source container which contains the
- start of the region to copy. */
- sfe_next = frame_entry_find (src_cont, src_start, 1);
- if (! sfe_next)
- {
- err = ENOENT;
- goto clean_up;
- }
-
- /* Make sure that SRC_START is aligned on a frame boundary. */
- if (((sfe_next->region.start - src_start) & (L4_MIN_PAGE_SIZE - 1)) != 0)
- {
- err = EINVAL;
- goto clean_up;
- }
-
- while (sfe_next && count)
- {
- struct frame_entry *sfe, *dfe;
- uintptr_t src_end;
-
- sfe = sfe_next;
-
- /* Does the source frame entry cover all of the memory that we
- need to copy? */
- if (src_start + count > sfe->region.start + sfe->region.size)
- /* No. We will have to read the following frame as well. */
- {
- src_end = sfe->region.start + sfe->region.size - 1;
-
- /* Get the next frame entry. */
- sfe_next = hurd_btree_frame_entry_next (sfe);
- if (sfe_next && sfe_next->region.start != src_end + 1)
- /* There is a gap between SFE and the next frame
- entry. */
- sfe_next = NULL;
- }
- else
- /* The end of the region to copy is contained within SFE. */
- {
- src_end = src_start + count - 1;
-
- /* Once we process this frame entry, we will be done. */
- sfe_next = NULL;
- }
-
- pthread_mutex_lock (&sfe->frame->lock);
-
- /* Get the frames we'll have in the destination container. */
- nr_fpages
- = l4_fpage_span (src_start - sfe->region.start + sfe->frame_offset,
- src_end - sfe->region.start + sfe->frame_offset,
- fpages);
- assert (nr_fpages > 0);
-
- for (i = 0; i < nr_fpages; i ++)
- {
- dfe = frame_entry_alloc ();
- if (! dfe)
- {
- pthread_mutex_unlock (&sfe->frame->lock);
- err = ENOMEM;
- goto clean_up;
- }
-
- /* XXX: We need to check the user's quota. */
- err = frame_entry_copy (dest_cont, dfe,
- dest_start, l4_size (fpages[i]),
- sfe,
- sfe->frame_offset
- + src_start - sfe->region.start,
- flags & HURD_PM_CONT_COPY_SHARED);
- if (err)
- {
- pthread_mutex_unlock (&sfe->frame->lock);
- frame_entry_free (dfe);
- goto clean_up;
- }
-
- src_start += l4_size (fpages[i]);
- dest_start += l4_size (fpages[i]);
- count -= l4_size (fpages[i]);
- }
-
- if (! (flags & HURD_PM_CONT_COPY_SHARED)
- && (sfe->frame->may_be_mapped & HURD_PM_CONT_WRITE))
- /* We just created a COW copy of SFE->FRAME and we have given
- out at least one map with write access. Revoke any write
- access to the frame. */
- {
- l4_fpage_t fpage = sfe->frame->memory;
- l4_set_rights (&fpage, L4_FPAGE_WRITABLE);
- l4_unmap_fpage (fpage);
- sfe->frame->may_be_mapped &= L4_FPAGE_EXECUTABLE|L4_FPAGE_READABLE;
- }
-
- pthread_mutex_unlock (&sfe->frame->lock);
- }
-
- if (count > 0)
- {
- assert (! sfe_next);
- err = ESRCH;
- }
-
- clean_up:
- assert (count == 0 || err);
-
- if (dest_cont == src_cont)
- /* If the source and destination are the same then don't unlock
- the same lock twice. */
- pthread_mutex_unlock (&src_cont->lock);
- else
- {
- /* Unlike with locking, the unlock order doesn't matter. */
- pthread_mutex_unlock (&src_cont->lock);
- pthread_mutex_unlock (&dest_cont->lock);
-
- hurd_cap_obj_lock (dest_cap);
- hurd_cap_ctx_end_cap_use (ctx, cap_use);
- }
-
- out:
- /* Return the amount actually copied. */
- l4_msg_append_word (ctx->msg, src_start - src_start_orig);
- return err;
-}
-
-error_t
-container_demuxer (hurd_cap_rpc_context_t ctx)
-{
- error_t err = 0;
-
- switch (l4_msg_label (ctx->msg))
- {
- case hurd_pm_container_create_id:
- err = container_create (ctx);
- break;
-
- case hurd_pm_container_share_id:
- err = container_share (ctx);
- break;
-
- case hurd_pm_container_allocate_id:
- err = container_allocate (ctx);
- break;
-
- case hurd_pm_container_deallocate_id:
- err = container_deallocate (ctx);
- break;
-
- case 128: /* The old container map implementation. */
- case hurd_pm_container_map_id:
- err = container_map (ctx);
- break;
-
- case hurd_pm_container_copy_id:
- err = container_copy (ctx);
- break;
-
- default:
- err = EOPNOTSUPP;
- }
-
- /* If the stub returns EOPNOTSUPP then we clear the message buffer,
- otherwise we assume that the message buffer contains a valid
- reply message and in which case we set the error code returned by
- the stub and have the demuxer succeed. */
- if (EXPECT_FALSE (err == EOPNOTSUPP))
- l4_msg_clear (ctx->msg);
-
- l4_set_msg_label (ctx->msg, err);
-
- if (err)
- debug ("%s: Returning %d to %x\n", __FUNCTION__, err, ctx->from);
-
- return 0;
-}
-
-error_t
-container_alloc (l4_word_t nr_fpages, l4_word_t *fpages,
- struct container **r_cont)
-{
- error_t err;
- hurd_cap_obj_t obj;
- struct container *cont;
- l4_word_t start;
- int i;
-
- err = hurd_cap_class_alloc (&container_class, &obj);
- if (err)
- return err;
-
- cont = hurd_cap_obj_to_user (struct container *, obj);
-
-#ifndef NDEBUG
- /* We just allocated CONT and we haven't given it to anyone else,
- however, frame_entry_create requires that CONT be locked and if
- it isn't, will trigger an assert. Make it happy. */
- pthread_mutex_lock (&cont->lock);
-#endif
-
- hurd_btree_frame_entry_tree_init (&cont->frame_entries);
- start = l4_address (fpages[0]);
- for (i = 0; i < nr_fpages; i ++)
- {
- struct frame_entry *fe = frame_entry_alloc ();
- if (! fe)
- {
- err = ENOMEM;
- break;
- }
-
- err = frame_entry_create (cont, fe,
- l4_address (fpages[i]) - start,
- l4_size (fpages[i]));
- if (err)
- {
- frame_entry_free (fe);
- break;
- }
-
- fe->frame->memory = fpages[i];
- pthread_mutex_unlock (&fe->frame->lock);
- }
-
-#ifndef NDEBUG
- pthread_mutex_unlock (&cont->lock);
-#endif
-
- if (! err)
- *r_cont = cont;
- return err;
-}
-
-
-/* Initialize a new container object. */
-static error_t
-container_init (hurd_cap_class_t cap_class, hurd_cap_obj_t obj)
-{
- struct container *cont = hurd_cap_obj_to_user (struct container *, obj);
-
- pthread_mutex_init (&cont->lock, 0);
- hurd_btree_frame_entry_tree_init (&cont->frame_entries);
-
- return 0;
-}
-
-/* Reinitialize a container object. */
-static void
-container_reinit (hurd_cap_class_t cap_class, hurd_cap_obj_t obj)
-{
- struct container *cont = hurd_cap_obj_to_user (struct container *, obj);
- struct frame_entry *fe, *next;
-
- assert (pthread_mutex_trylock (&cont->lock));
- pthread_mutex_unlock (&cont->lock);
-
- for (fe = hurd_btree_frame_entry_first (&cont->frame_entries);
- fe; fe = next)
- {
- next = hurd_btree_frame_entry_next (fe);
- pthread_mutex_lock (&fe->frame->lock);
- frame_entry_destroy (cont, fe, true);
- frame_entry_free (fe);
- }
-
- hurd_btree_frame_entry_tree_init (&cont->frame_entries);
-}
-
-/* Initialize the container class subsystem. */
-error_t
-container_class_init ()
-{
- return hurd_cap_class_init (&container_class, struct container *,
- container_init, NULL, container_reinit, NULL,
- container_demuxer);
-}
diff --git a/physmem/frame-entry.c b/physmem/frame-entry.c
deleted file mode 100644
index 47617ec..0000000
--- a/physmem/frame-entry.c
+++ /dev/null
@@ -1,1232 +0,0 @@
-/* frame-entry.c - Frame entry management.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Neal H. Walfield <neal@gnu.org>.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with the GNU Hurd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <assert.h>
-#include <string.h>
-#include <errno.h>
-#include <hurd/btree.h>
-#include <hurd/slab.h>
-
-#include <compiler.h>
-
-#include "priv.h"
-#include "physmem.h"
-#include "zalloc.h"
-
-static error_t
-frame_entry_constructor (void *hook, struct frame_entry *frame_entry)
-{
- frame_entry->shared_next = frame_entry;
- frame_entry->shared_prevp = &frame_entry->shared_next;
-
- return 0;
-}
-
-SLAB_CLASS(frame_entry, struct frame_entry)
-
-static struct hurd_frame_entry_slab_space frame_entry_space;
-
-void
-frame_entry_init (void)
-{
- hurd_frame_entry_slab_init (&frame_entry_space, NULL, NULL,
- frame_entry_constructor, NULL, NULL);
-}
-
-void
-frame_entry_dump (struct frame_entry *fe)
-{
- printf ("frame_entry: %x:%x+%x@%x, frame: %x:%x+%x, ->cow: %d, ->refs: %d, shared list: ",
- fe, fe->region.start, fe->region.size, fe->frame_offset,
- fe->frame, l4_address (fe->frame->memory),
- l4_size (fe->frame->memory),
- fe->frame->cow, fe->frame->refs);
-
- int shares = 0;
- struct frame_entry *f = fe;
- do
- {
- printf ("%x:%x+%x->", f, f->frame_offset, f->region.size);
- shares ++;
- assert (f->frame == fe->frame);
- f = f->shared_next;
- }
- while (f != fe);
- printf ("(=%d)\n", shares);
-
- int entries = 0;
- for (f = fe->frame->frame_entries; f; f = f->next)
- entries ++;
- assert (entries == fe->frame->refs);
-}
-
-struct frame_entry *
-frame_entry_alloc (void)
-{
- error_t err;
- struct frame_entry *frame_entry;
-
- err = hurd_frame_entry_slab_alloc (&frame_entry_space, &frame_entry);
- if (err)
- return 0;
-
- assert (frame_entry->shared_next == frame_entry);
- assert (frame_entry->shared_prevp == &frame_entry->shared_next);
-
- return frame_entry;
-}
-
-void
-frame_entry_free (struct frame_entry *frame_entry)
-{
- assert (frame_entry->shared_next == frame_entry);
-#ifndef NDEBUG
- memset (frame_entry, 0xfe, sizeof (struct frame_entry));
- frame_entry_constructor (0, frame_entry);
-#endif
- hurd_frame_entry_slab_dealloc (&frame_entry_space, frame_entry);
-}
-
-/* If SHARE is non-NULL, add FRAME_ENTRY (which is not attach to any
- share list) to SHARE's share list. Otherwise, remove FRAME_ENTRY
- from the share list to which it is currently attached. */
-static void
-frame_entry_share_with (struct frame_entry *frame_entry,
- struct frame_entry *share)
-{
- if (share)
- /* Add FRAME_ENTRY to SHARE's share list. */
- {
- /* FRAME_ENTRY shouldn't be on a shared list. */
- assert (frame_entry->shared_next == frame_entry);
-
- frame_entry->shared_next = share;
- frame_entry->shared_prevp = share->shared_prevp;
- *frame_entry->shared_prevp = frame_entry;
- share->shared_prevp = &frame_entry->shared_next;
- }
- else
- /* Remove FRAME_ENTRY from any share list. */
- {
- *frame_entry->shared_prevp = frame_entry->shared_next;
- frame_entry->shared_next->shared_prevp = frame_entry->shared_prevp;
-
- frame_entry->shared_next = frame_entry;
- frame_entry->shared_prevp = &frame_entry->shared_next;
- }
-}
-
-error_t
-frame_entry_create (struct container *cont,
- struct frame_entry *frame_entry,
- uintptr_t cont_addr, size_t size)
-{
- error_t err;
-
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
- /* Size must be a power of 2. */
- assert (size > 0 && (size & (size - 1)) == 0);
-
- frame_entry->container = cont;
- frame_entry->region.start = cont_addr;
- frame_entry->region.size = size;
- frame_entry->frame_offset = 0;
-
- frame_entry->frame = frame_alloc (size);
- if (! frame_entry->frame)
- return errno;
-
- err = container_attach (cont, frame_entry);
- if (EXPECT_FALSE (err))
- {
- debug ("Overlap: %x+%x\n", cont_addr, size);
- frame_deref (frame_entry->frame);
- return EEXIST;
- }
-
- frame_add_user (frame_entry->frame, frame_entry);
-
- frame_entry_share_with (frame_entry, NULL);
-
- return 0;
-}
-
-error_t
-frame_entry_copy (struct container *cont,
- struct frame_entry *frame_entry,
- uintptr_t cont_addr, size_t size,
- struct frame_entry *source,
- size_t frame_offset,
- bool shared_memory)
-{
- error_t err;
-
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
- assert (pthread_mutex_trylock (&source->frame->lock) == EBUSY);
- /* Size must be a power of 2. */
- assert (size > 0 && (size & (size - 1)) == 0);
- assert (source->frame);
- /* Make sure that the provided offset is valid. */
- assert (frame_offset >= 0
- && frame_offset <= l4_size (source->frame->memory) - size);
- /* The frame entry must refer to memory starting at a size aligned
- boundary. */
- assert ((frame_offset & (size - 1)) == 0);
-
- frame_entry->container = cont;
- frame_entry->region.start = cont_addr;
- frame_entry->region.size = size;
- frame_entry->frame = source->frame;
- frame_entry->frame_offset = frame_offset;
-
- err = container_attach (cont, frame_entry);
- if (EXPECT_FALSE (err))
- {
- debug ("Overlap: %x+%x\n", cont_addr, size);
- return EEXIST;
- }
-
- frame_ref (source->frame);
- frame_add_user (source->frame, frame_entry);
-
- if (shared_memory)
- /* This is a copy of the entry but the physical memory is
- shared. */
- frame_entry_share_with (frame_entry, source);
- else
- /* Copy on write. */
- {
- source->frame->cow ++;
- frame_entry_share_with (frame_entry, NULL);
- }
-
- return 0;
-}
-
-error_t
-frame_entry_use (struct container *cont,
- struct frame_entry *frame_entry,
- uintptr_t cont_addr, size_t size,
- struct frame *source,
- size_t frame_offset)
-{
- error_t err;
-
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
- assert (pthread_mutex_trylock (&source->lock) == EBUSY);
- /* SIZE must be a power of 2. */
- assert (size > 0 && (size & (size - 1)) == 0);
- /* FRAME_OFFSET must be a multiple of SIZE. */
- assert ((frame_offset & (size - 1)) == 0);
- /* The frame entry must actually cover the FRAME. */
- assert (frame_offset + size <= l4_size (source->memory));
-
- frame_entry->container = cont;
- frame_entry->region.start = cont_addr;
- frame_entry->region.size = size;
- frame_entry->frame_offset = frame_offset;
- frame_entry->frame = source;
-
- err = container_attach (cont, frame_entry);
- if (EXPECT_FALSE (err))
- {
- debug ("Overlap: %x+%x\n", cont_addr, size);
- return EEXIST;
- }
-
- frame_ref (frame_entry->frame);
- frame_add_user (frame_entry->frame, frame_entry);
-
- frame_entry_share_with (frame_entry, 0);
-
- return 0;
-}
-
-void
-frame_entry_destroy (struct container *cont, struct frame_entry *frame_entry,
- bool do_unlock)
-{
- if (cont)
- {
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
- container_detach (cont, frame_entry);
- }
-
- assert (frame_entry->frame);
- assert (pthread_mutex_trylock (&frame_entry->frame->lock) == EBUSY);
-
- frame_drop_user (frame_entry->frame, frame_entry);
-
- if (frame_entry->shared_next != frame_entry)
- /* FRAME_ENTRY is on a share list, remove it. */
- frame_entry_share_with (frame_entry, NULL);
- else
- /* FRAME_ENTRY is not on a share list and therefore holds a COW
- copy if there are other users of the underlying frame. */
- {
- if (frame_entry->frame->frame_entries)
- {
- assert (frame_entry->frame->cow > 0);
- frame_entry->frame->cow --;
- }
- else
- assert (frame_entry->frame->cow == 0);
- }
-
- if (do_unlock)
- frame_deref (frame_entry->frame);
- else
- frame_release (frame_entry->frame);
-}
-
-struct frame_entry *
-frame_entry_find (struct container *cont, uintptr_t cont_addr, size_t size)
-{
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
-
- struct region region = { cont_addr, size };
- return hurd_btree_frame_entry_find (&cont->frame_entries, &region);
-}
-
-error_t
-frame_entry_map (struct frame_entry *fe,
- size_t start, size_t len, int access,
- uintptr_t vaddr, l4_msg_t msg,
- size_t *amount)
-{
- error_t err;
-
- assert (pthread_mutex_trylock (&fe->frame->lock) == EBUSY);
- assert (start < fe->region.size);
- assert (len <= fe->region.size);
- assert (start + len <= fe->region.size);
- assert ((access & ~HURD_PM_CONT_RWX) == 0);
-
- if (EXPECT_FALSE ((access & HURD_PM_CONT_WRITE) && fe->frame->cow))
- /* The caller requests a mapping with write access and the
- frame is marked COW; now's the time to do the copy. */
- {
- /* If the frame has COW copies, there must be no extant
- writable mappings. */
- assert (! (fe->frame->may_be_mapped & HURD_PM_CONT_WRITE));
- /* If the frame has COW copies there has to be at least two
- users. */
- assert (fe->frame->refs > 1);
-
- /* If this is a shared memory copy, we need to allocate a
- frame to cover the largest frame entry. */
- struct frame_entry *base = fe;
-
- if (! (base->frame_offset == 0
- && base->region.size == l4_size (fe->frame->memory)))
- for (struct frame_entry *s = fe->shared_next;
- s != fe; s = s->shared_next)
- {
- assert (s->frame == fe->frame);
-
- /* Does S contain BASE? */
- if (s->frame_offset <= base->frame_offset
- && (base->frame_offset + base->region.size
- <= s->frame_offset + s->region.size))
- {
- base = s;
-
- if (base->frame_offset == 0
- && base->region.size == l4_size (fe->frame->memory))
- break;
- }
- }
-
- struct frame *frame = frame_alloc (base->region.size);
-
- /* The point of this function is to get a mapping of the
- memory. Even if ORIGINAL_FRAME doesn't have memory
- allocated yet (and hence no copy needs to be done), FRAME
- will need the memory shortly. */
- frame_memory_bind (frame);
-
- /* We only have to do a memcpy if the source has memory
- allocated. */
- if (l4_address (fe->frame->memory))
- memcpy ((void *) l4_address (frame->memory),
- (void *) (l4_address (fe->frame->memory)
- + base->frame_offset),
- base->region.size);
-
- /* Change everyone using this copy of FE->FRAME to use FRAME
- (i.e. all frame entries on FE's shared frame list). */
-
- struct frame *original_frame = fe->frame;
-
- /* Iterate over the shared list moving all but BASE. */
- struct frame_entry *falsely_shared = 0;
- struct frame_entry *next = base->shared_next;
- while (next != base)
- {
- /* S will be removed from the shared list. Get the next
- element now. */
- struct frame_entry *s = next;
- next = s->shared_next;
-
- if (s->frame_offset < base->frame_offset
- || s->frame_offset >= base->frame_offset + base->region.size)
- /* S is falsely sharing with FE. This can happen
- when, for instance, a client A shares the middle
- 8kb of a 16kb frame with a second client, B.
- (Hence B references 2 4kb frame entries.) If A
- deallocates the 16kb region, B's two frame entries
- are still marked as shared, however, they do not
- actually share any physical memory. If there is an
- extant COW on the physical memory then we only want
- to copy the memory that is actually shared (there
- is no need to allocate more physical memory than
- necessary). */
- {
- assert ((s->frame_offset + base->region.size
- <= base->frame_offset)
- || (s->frame_offset
- >= base->frame_offset + base->region.size));
-
- /* Remove S from the shared list. */
- frame_entry_share_with (s, NULL);
-
- if (! falsely_shared)
- /* First one. Reset. */
- falsely_shared = s;
- else
- /* Add to the falsely shared list with its
- possible real sharers. */
- frame_entry_share_with (s, falsely_shared);
- }
- else
- /* Migrate S from ORIGINAL_FRAME to the copy, FRAME.
- (If S is BASE we migrate later.) */
- {
- frame_drop_user (original_frame, s);
- frame_release (original_frame);
-
- frame_ref (frame);
- frame_add_user (frame, s);
- s->frame = frame;
- s->frame_offset -= base->frame_offset;
-
- assert (s->frame_offset >= 0);
- assert (s->frame_offset + s->region.size
- <= l4_size (s->frame->memory));
- }
- }
-
- /* Of those on the shared list, only BASE still references
- the original frame. Removing BASE may case some of
- ORIGINAL_FRAME to now be unreferenced. Hence, we cannot
- simply move it as we did with the others. */
- uintptr_t bstart = base->region.start;
- size_t bsize = base->region.size;
- bool fe_is_base = (base == fe);
-
- frame_entry_share_with (base, NULL);
-
- /* Reallocate the frame entry. */
- err = frame_entry_deallocate (base->container, base, bstart, bsize);
- assert_perror (err);
- base = frame_entry_alloc ();
- err = frame_entry_use (base->container, base, bstart, bsize, frame, 0);
- assert_perror (err);
-
- /* All the frame entries using FRAME are on the shared
- list. */
- frame_entry_share_with (base, frame->frame_entries);
-
- if (fe_is_base)
- fe = base;
-
- /* We managed to pick up an extra reference to FRAME in the
- loop (we already had one before we entered the loop and
- we added a reference for each entry which shares the
- frame including FE). Drop it now. */
- frame_release (frame);
- }
- else
- /* Allocate the memory (if needed). */
- frame_memory_bind (fe->frame);
-
- fe->frame->may_be_mapped |= access;
-
- /* Get the start of the mem. */
- l4_word_t mem = l4_address (fe->frame->memory) + fe->frame_offset + start;
-
- l4_fpage_t fpages[(L4_NUM_MRS - (l4_untyped_words (l4_msg_msg_tag (msg))
- + l4_untyped_words (l4_msg_msg_tag (msg))))
- / 2];
- int nr_fpages = l4_fpage_xspan (mem, mem + len - 1, vaddr,
- fpages, sizeof (fpages) / sizeof (*fpages));
-
- for (int i = 0; i < nr_fpages; i ++)
- {
- /* Set the desired permissions. */
- l4_set_rights (&fpages[i], access);
-
- /* Add the map item to the message. */
- l4_msg_append_map_item (msg, l4_map_item (fpages[i], vaddr));
-
- vaddr += l4_size (fpages[i]);
- }
-
- if (amount)
- *amount = l4_address (fpages[nr_fpages - 1])
- + l4_size (fpages[nr_fpages - 1]) - mem;
-
- return
- l4_address (fpages[nr_fpages]) + l4_size (fpages[nr_fpages]) - mem < len
- ? ENOSPC : 0;
-}
-
-error_t
-frame_entry_deallocate (struct container *cont,
- struct frame_entry *frame_entry,
- const uintptr_t cont_addr,
- const size_t len)
-{
- const uintptr_t cont_start
- = cont_addr - frame_entry->region.start + frame_entry->frame_offset;
- struct frame *frame;
-
- /* FE currently uses FE->FRAME. Make the TODO bytes of memory FE
- references starting at byte SKIP (relative to the base of FE) use
- the array of FRAMES.
-
- Returns the last frame used (i.e. the one which contains byte
- SKIP+TODO). */
- struct frame **move (struct frame_entry *fe, size_t skip, size_t todo,
- l4_fpage_t *fpages, struct frame **frames)
- {
- error_t err;
-
- assert (todo > 0);
- /* The first byte of FE may not by the first byte of
- *FRAMES. For instance, FRAMES may be 8kb long
- but FE references only the second 4kb. */
- assert (skip < fe->region.size);
- assert (fe->frame_offset + skip >= l4_address (*fpages));
- uintptr_t frame_offset = fe->frame_offset + skip - l4_address (*fpages);
-
- /* The container address of the first byte. */
- uintptr_t addr = fe->region.start + skip;
-
- for (; todo > 0; frames ++, fpages ++)
- {
- size_t count = l4_size (*fpages) - frame_offset;
- if (count > todo)
- count = todo;
-
- l4_fpage_t subfpages[L4_FPAGE_SPAN_MAX];
- int n = l4_fpage_span (frame_offset,
- frame_offset + count - 1,
- subfpages);
-
- for (int i = 0; i < n; i ++)
- {
- struct frame_entry *n = frame_entry_alloc ();
-
- err = frame_entry_use (cont, n, addr,
- l4_size (subfpages[i]),
- *frames, l4_address (subfpages[i]));
- assert_perror (err);
-
- /* Although they only falsely share FE->FRAME (which is
- perfectly correct), the new frame entries are on a
- share list to reduce the number of gratuitous COWs:
- there is one COW for each shared copy; if there is
- only a single shared copy then no COWs need to be
- performed. */
- frame_entry_share_with (n, fe);
-
- frame_offset = 0;
- addr += l4_size (subfpages[i]);
- todo -= l4_size (subfpages[i]);
- }
- }
-
- frames --;
-
- return frames;
- }
-
- /* Migrate the frame entries using FRAME between byte START and END
- to new frame structures which use the same physical memory. */
- void migrate (uintptr_t start, uintptr_t end)
- {
- assert (start < end);
- /* START must come before the end of the deallocation zone. */
- assert (start <= cont_start + len);
- /* END must come after the start of the deallocation zone. */
- assert (end + 1 >= cont_start);
-
- /* FRAME_ENTRY must cover all of the underlying frame. */
- assert (frame_entry->frame_offset == 0);
- assert (frame_entry->region.size == l4_size (frame->memory));
-
- /* Allocate new frames and point them to their respective pieces
- of FRAME. */
- l4_fpage_t fpages[L4_FPAGE_SPAN_MAX];
- int nr_fpages = l4_fpage_span (start, end, fpages);
- struct frame **frames = alloca (sizeof (struct frame *) * nr_fpages);
-
- for (int i = 0; i < nr_fpages; i ++)
- {
- frames[i] = frame_alloc (l4_size (fpages[i]));
- frames[i]->memory = l4_fpage (l4_address (frame->memory)
- + l4_address (fpages[i]),
- l4_size (fpages[i]));
- frames[i]->may_be_mapped = frame->may_be_mapped;
- }
-
- /* Move the parts of FRAME_ENTRY which are not going to be
- deallocated to the new frames. */
-
- int i = 0;
-
- /* If START is before CONT_START then we need to relocate some
- of FRAME_ENTRY.
-
- START END
- v v
- [ | | | | | ] <- FRAME_ENTRY
- ^ ^
- | CONT_START+LEN
- CONT_START
- \/
- keep
-
- (END can be before CONT_START.)
- */
- if (start < cont_start)
- {
- size_t todo;
- if (end < cont_start)
- todo = end - start + 1;
- else
- todo = cont_start - start;
-
- /* Find the frame which contains byte START. */
- for (; i < nr_fpages; i ++)
- if (start < l4_address (fpages[i]) + l4_size (fpages[i]))
- break;
-
- struct frame **l
- = move (frame_entry, start, todo, &fpages[i], &frames[i]);
-
- int last = i + ((void *) l - (void *) &frames[i]) / sizeof (*l);
- assert (last < nr_fpages);
- for (int j = i; j <= last; j ++)
- frames[j]->cow ++;
- i = last;
- }
-
- /* If CONT_START+LEN is before END then we need to relocate some
- of FRAME_ENTRY.
-
- START END
- v v
- [ | | | | | ] <- FRAME_ENTRY
- ^ ^
- | CONT_START+LEN
- CONT_START
- \ /
- keep
-
- (START can be after CONT_START+LEN.)
- */
- if (cont_start + len < end)
- {
- size_t skip;
-
- if (start < cont_start + len)
- skip = cont_start + len;
- else
- skip = start;
-
- /* Find the frame which contains the first byte referenced
- by FRAME_ENTRY after the region to deallocate. */
- for (; i < nr_fpages; i ++)
- if (skip >= l4_address (fpages[i]))
- break;
-
- struct frame **l
- = move (frame_entry, skip, end - skip + 1, &fpages[i], &frames[i]);
-
- int last = i + ((void *) l - (void *) &frames[i]) / sizeof (*l);
- assert (last < nr_fpages);
- for (int j = i; j <= last; j ++)
- frames[j]->cow ++;
- }
-
- /* Change the rest of the frame entries referencing FRAME
- between START and END to reference the respective frames in
- FRAMES. */
-
- struct frame_entry *n = frame->frame_entries;
- while (n)
- {
- struct frame_entry *fe = n;
- n = fe->next;
-
- /* Any frame entries connected to FRAME_ENTRY should
- reference the same frame. */
- assert (frame == fe->frame);
-
- /* Any frames entries referencing memory before START should
- have been relocated from FRAME_ENTRY in a prior pass
- (except for FRAME_ENTRY, of course). */
- assert (fe == frame_entry || fe->frame_offset >= start);
-
- if (fe == frame_entry)
- continue;
- else if (fe->frame_offset < end)
- {
- /* END is either the end of the frame or the memory
- following END is completely unreferenced (and to be
- deallocated). Hence any frame entry which starts
- before END ends before it as well. */
- assert (fe->frame_offset + fe->region.size - 1 <= end);
-
- void adjust (struct frame_entry *fe, int i)
- {
- assert (fe->frame == frame);
- /* FE fits entirely in FRAMES[I]. */
- assert (l4_address (fpages[i]) <= fe->frame_offset
- && (fe->frame_offset + fe->region.size
- <= (l4_address (fpages[i])
- + l4_size (fpages[i]))));
-
- /* Adjust the frame offset. */
- fe->frame_offset -= l4_address (fpages[i]);
-
- /* Make sure N always points to an unprocessed frame
- entry on FRAME->FRAME_ENTRIES. */
- if (fe == n)
- n = fe->next;
-
- /* Move from the old frame to the new one. */
- frame_drop_user (frame, fe);
- frame_release (frame);
-
- fe->frame = frames[i];
-
- frame_ref (fe->frame);
- frame_add_user (fe->frame, fe);
- }
-
- /* Find the frame which holds the start of the
- memory E references. */
- int i;
- for (i = 0; i < nr_fpages; i ++)
- if (fe->frame_offset >= l4_address (fpages[i])
- && (fe->frame_offset
- < l4_address (fpages[i]) + l4_size (fpages[i])))
- break;
-
- adjust (fe, i);
-
- if (fe->shared_next == fe)
- /* FE was not on a shared list. Remove its COW from
- FRAME. Add a COW to the new frame. */
- {
- assert (frame->cow > 0);
- frame->cow --;
- fe->frame->cow ++;
- }
- else
- /* FE was on a shared list. Fix it up. */
- {
- bool shares_old_cow = false;
- bool shares_new_cow_with_frame_entry = false;
-
- /* We need to use FE as an anchor hence we attach to
- here and then at the end detach FE and attach it
- to the resulting list. */
- struct frame_entry *shared_list = 0;
-
- struct frame_entry *sn = fe->shared_next;
- while (sn != fe)
- {
- struct frame_entry *s = sn;
- sn = s->shared_next;
-
- if (s == frame_entry)
- shares_old_cow = true;
- else if (s->frame != frame)
- /* S was already relocated which means that it
- was split off from FRAME_ENTRY which means
- that the cow was already counted. */
- {
- shares_old_cow = true;
-
- if (s->frame == fe->frame)
- {
- shares_new_cow_with_frame_entry = true;
-
- frame_entry_share_with (s, NULL);
- if (! shared_list)
- shared_list = s;
- else
- frame_entry_share_with (s, shared_list);
- }
- }
- else if (l4_address (fpages[i]) <= s->frame_offset
- && (s->frame_offset < l4_address (fpages[i])
- + l4_size (fpages[i])))
- /* S and FE continue to share a copy of the
- underlying frame (i.e. no false
- sharing). */
- {
- adjust (s, i);
-
- frame_entry_share_with (s, NULL);
- if (! shared_list)
- shared_list = s;
- else
- frame_entry_share_with (s, shared_list);
- }
- else
- shares_old_cow = true;
- }
-
- frame_entry_share_with (fe, 0);
- if (shared_list)
- frame_entry_share_with (fe, shared_list);
-
- if (! shares_old_cow)
- /* There was no false sharing, i.e. there are no
- frame entries still using this copy of the old
- frame. */
- {
- assert (frame->cow > 0);
- frame->cow --;
- }
-
- if (! shares_new_cow_with_frame_entry)
- /* Unless we share our copy of the underlying
- frame with FRAME_ENTRY, we need to add a
- COW. */
- fe->frame->cow ++;
- }
- }
- else
- assert (fe->frame_offset > end);
- }
-
- /* Any new frame entries created from FRAME_ENTRY are put on its
- shared list. If they were not picked up above (because
- FRAME_ENTRY is on a share list) then some of them may not
- have been properly fixed up. */
- if (frame_entry->shared_next != frame_entry)
- {
- struct frame_entry *n = frame_entry->shared_next;
- while (n != frame_entry)
- {
- struct frame_entry *fe = n;
- n = fe->shared_next;
-
- if (fe->frame != frame)
- /* This is a new frame entry. */
- {
- assert (l4_address (frame->memory)
- <= l4_address (fe->frame->memory));
- assert (l4_address (fe->frame->memory)
- <= (l4_address (frame->memory)
- + l4_size (frame->memory)));
-
- struct frame_entry *shared_list = 0;
- struct frame_entry *m = fe->shared_next;
- while (m != fe)
- {
- struct frame_entry *b = m;
- m = m->shared_next;
-
- if (fe->frame == b->frame)
- {
- if (b == n)
- n = n->shared_next;
-
- frame_entry_share_with (b, NULL);
- if (! shared_list)
- shared_list = b;
- else
- frame_entry_share_with (b, shared_list);
- }
- }
-
- frame_entry_share_with (fe, 0);
- if (shared_list)
- frame_entry_share_with (fe, shared_list);
- }
- }
- }
-
- /* Tidy up the new frames. */
- for (i = 0; i < nr_fpages; i ++)
- {
- /* Each frame should have picked up at least one frame
- entry. */
- assert (frames[i]->frame_entries);
-
- /* Each user of FRAMES[i] added a cow. That is one too
- many. Remove it now. */
- assert (frames[i]->cow > 0);
- frames[i]->cow --;
- if (frames[i]->cow > 0
- && (frames[i]->may_be_mapped & L4_FPAGE_WRITABLE))
- {
- l4_unmap_fpage (l4_fpage_add_rights (frames[i]->memory,
- L4_FPAGE_WRITABLE));
- frames[i]->may_be_mapped
- &= L4_FPAGE_EXECUTABLE|L4_FPAGE_READABLE;
- }
-
- /* A new frame starts life with a single reference (even
- though no frame entries use it). We drop that extra one
- now. */
- assert (frames[i]->refs > 1);
- frame_deref (frames[i]);
- }
- }
-
- assert (pthread_mutex_trylock (&cont->lock) == EBUSY);
- /* Assert that the region to deallocate falls completely within
- FRAME_ENTRY. */
- assert (cont_addr >= frame_entry->region.start
- && (cont_addr + len
- <= frame_entry->region.start + frame_entry->region.size));
- /* Assert that CONT_ADDR refers to memory which starts on a
- multiple of the base page size. */
- assert ((cont_start & (L4_MIN_PAGE_SIZE - 1)) == 0);
- /* And that LEN is a multiple of the base page size. */
- assert ((len & (L4_MIN_PAGE_SIZE - 1)) == 0);
-
- frame = frame_entry->frame;
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
- assert (frame->frame_entries);
-
-#if 0
- printf ("%s (cont:%x, fe: %x, dzone:%x+%x); ",
- __FUNCTION__, cont, frame_entry, cont_start, len);
- frame_entry_dump (frame_entry);
-#endif
-
- /* Before we do anything else, we need to make sure that any
- mappings via FRAME_ENTRY are removed: most importantly, if we
- zfree any memory and then reallocate (either internally or by
- another process) before it is unmapped, any extant mappers may
- have the opportunity to see (or modify) it; but also, any
- mappings made via FRAME_ENTRY of the region to deallocate must
- (eventually) be invalidated. Unfortunately, this means
- invalidating all mappings of FRAME. */
- if (frame->may_be_mapped)
- {
- l4_fpage_t fpage = l4_fpage (l4_address (frame->memory)
- + frame_entry->frame_offset,
- frame_entry->region.size);
- l4_unmap_fpage (l4_fpage_add_rights (fpage, frame->may_be_mapped));
-
- /* If we unmapped the whole frame then we can clear
- FRAME->MAY_BE_MAPPED. */
- if (frame_entry->frame_offset == 0
- && frame_entry->region.size == l4_size (frame->memory))
- frame->may_be_mapped = 0;
- }
-
- /* Detach FRAME_ENTRY from its container: frame entries in the same
- container cannot overlap and we are going to replace the parts of
- FRAME_ENTRY with a set of smaller frame entries covering the
- physical memory which will not be deallocated. */
- container_detach (cont, frame_entry);
-
-
- if (! frame->frame_entries->next)
- /* FRAME_ENTRY is the only frame entry using FRAME. */
- {
- /* Make sure it is using the entire frame. */
- assert (frame_entry->frame_offset == 0);
- assert (frame_entry->region.size == l4_size (frame->memory));
-
- if (cont_start > 0)
- migrate (0, cont_start - 1);
- if (cont_start + len < l4_size (frame->memory))
- migrate (cont_start + len, l4_size (frame->memory) - 1);
-
- assert (frame->refs == 1);
-
- /* If some of the frame entry was migrated, we manually free any
- physical memory. */
- if (cont_start > 0 || cont_start + len < l4_size (frame->memory))
- {
- l4_fpage_t fpages[L4_FPAGE_SPAN_MAX];
- int nr_fpages = l4_fpage_span (cont_start,
- cont_start + len - 1, fpages);
-
- for (int i = 0; i < nr_fpages; i ++)
- {
-#ifndef NDEBUG
- memset ((void *) l4_address (frame->memory)
- + l4_address (fpages[i]),
- 0xde, l4_size (fpages[i]));
-#endif
- zfree (l4_address (frame->memory) + l4_address (fpages[i]),
- l4_size (fpages[i]));
- }
-
- frame->may_be_mapped = 0;
- frame->memory = l4_fpage (0, l4_size (frame->memory));
- }
-
- frame_entry_destroy (NULL, frame_entry, true);
- frame_entry_free (frame_entry);
- return 0;
- }
-
- if (frame_entry->frame_offset > 0
- || frame_entry->region.size < l4_size (frame->memory))
- /* FRAME_ENTRY does not cover all of the underlying frame. By
- definition, some other frame entry must. As such, all we have
- to do is fix up the parts of FRAME_ENTRY which will not be
- deallocated and then drop it. */
- {
-#ifndef NDEBUG
- /* Assert that a frame entry covers all of FE. */
- struct frame_entry *fe;
- for (fe = frame->frame_entries; fe; fe = fe->next)
- if (fe->frame_offset == 0
- && fe->region.size == l4_size (frame->memory))
- break;
- assert (fe);
-#endif
-
- l4_fpage_t fpage = l4_fpage (0, l4_size (fe->frame->memory));
- struct frame **f;
-
- if (frame_entry->frame_offset < cont_start)
- {
- f = move (frame_entry, 0, cont_start - frame_entry->frame_offset,
- &fpage, &frame_entry->frame);
- assert (f == &frame_entry->frame);
- }
-
- if (cont_start + len
- < frame_entry->frame_offset + frame_entry->region.size)
- {
- f = move (frame_entry,
- cont_start + len - frame_entry->frame_offset,
- frame_entry->frame_offset + frame_entry->region.size
- - (cont_start + len),
- &fpage, &frame_entry->frame);
- assert (f == &frame_entry->frame);
- }
-
- frame_entry_destroy (NULL, frame_entry, true);
- frame_entry_free (frame_entry);
- return 0;
- }
-
-
- /* Multiple frame entries reference FRAME_ENTRY->FRAME. Since frame
- entries may reference only part of the underlying frame, by
- releasing FRAME_ENTRY, 1) no single frame entry may now reference
- all of the underlying frame and 2) some of FRAME_ENTRY->FRAME may
- no longer be referenced and thus can be freed. For example, a
- client, A, may allocate a 16kb frame. A may give a second
- client, B, a copy of the middle 8kb. (Since the start address of
- the 8kb area is not a multiple of the areas size, we create two
- frame entries: one for each 4kb region.) If A then deallocates
- the 16kb region, we would like to release the unreferenced
- physical memory. By inspection, we see that the first 4kb and
- the last 4kb are no longer used and could be freed:
-
- A
- / \
- 0kb| | | | |16kb
- \ / \ /
- B.1 B.2
-
- This problem becomes slightly more complicated when only part of
- the frame entry is freed, e.g. if the client only deallocate the
- first 8kb of A. Further, we must maintain the predicate that all
- frames have at least one frame entry which references them in
- their entirety.
-
- We take the following approach:
-
- Set A=(DEALLOC_START, LEN) to the region to deallocate (relative
- to the start of the underlying frame). To identify unreferenced
- regions, iterate over the frame entries referencing the frame
- (excluding the one to deallocate). If the intersection of the
- frame entry and A is non-empty (i.e. A contains any part of the
- frame entry), save the result in T. If R is not set, set it to
- T. Otherwise, if T occurs before R, set R to T.
-
- If R completely covers the region to deallocate, A, we are done.
- Otherwise, any memory between the start of A and the start of R
- is unreferenced and we can free it (doing any required frame
- splitting). Move the start of A to end of R. If the area is
- non-NULL, repeat from the beginning. Otherwise, we are done. */
-
-
- /* Start of the region to deallocate relative to the start of the
- frame which we still need to confirmed as referenced or not. */
- uintptr_t dealloc_start = cont_start;
- size_t dealloc_len = len;
-
- /* Area within DEALLOC_START+DEALLOC_LEN which we've confirmed
- another frame entry references.
-
- Initially, we (consistent with the above predicate) set the start
- address to the end of the region to deallocate with a length of
- 0. Because we prefer earlier and larger regions, if this isn't
- changed after iterating over all of the frame entries, we know it
- is safe to free the region. */
- uintptr_t refed_start = dealloc_start + dealloc_len;
- size_t refed_len = 0;
-
- /* Once we've identified a region which we can free (i.e. a region
- which no frame entry references), we will need to split memory
- which is still referenced into smaller frames. This is
- complicated by the fact that there may be multiple holes.
- Consider:
-
- FE
- / \
- [ | | | ]
- 0 4 8 C F
- \ / \ /
- A B
-
- If all of frame entry FE is freed, frame entry A and B still
- reference two 4k segments of the frame. We can free from the 4k
- regions starting at 0k and 8k.
-
- After each iteration, during which we've identified a region
- which is referenced, we free the memory between PROCESSED and
- REFED_START and relocate the frame entries between
- REFED_START+REFED_LEN. We then set PROCESSED to
- REFED_START+REFED_LEN. */
- uintptr_t processed = 0;
-
- for (;;)
- {
- /* Iterate over the frame entries checking to see if they
- reference DEALLOC_START+DEALLOC_LEN and modifying
- REFED_START+REFED_LEN appropriately. */
- for (struct frame_entry *fe = frame->frame_entries; fe; fe = fe->next)
- {
- assert (fe->frame == frame);
-
- /* Don't consider the frame entry we are deallocating. */
- if (fe == frame_entry)
- continue;
-
- if (fe->frame_offset + fe->region.size <= dealloc_start)
- /* FE ends before the region to deallocate begins. */
- continue;
-
- if (fe->frame_offset >= dealloc_start + dealloc_len)
- /* FE starts after the region to deallocate ends. */
- continue;
-
- if (fe->frame_offset < refed_start)
- /* FE covers at least part of the region to deallocate and
- starts before what we've found so far. */
- {
- refed_start = fe->frame_offset;
- refed_len = fe->region.size;
- }
- else if (fe->frame_offset == refed_start
- && fe->region.size > refed_len)
- /* FE starts at REFED_START and is larger than
- REFED_LEN. */
- refed_len = fe->region.size;
- }
-
- if (processed < refed_start && processed < dealloc_start)
- /* PROCESSED comes before both REFED_START and DEALLOC_START.
- If there is memory to be freed, that memory is between
- DEALLOC_START and REFED_START. On the other hand,
- REFED_START may come before DEALLOC_START if a frame
- straddles DEALLOC_START. There is no need to gratuitously
- split it apart. */
- migrate (processed,
- refed_start < dealloc_start
- ? refed_start - 1 : dealloc_start - 1);
-
- /* The area between DEALLOC_START and REFED_START is not
- referenced. Free it and adjust the frame entries. */
-
- if (dealloc_start < refed_start && l4_address (frame->memory))
- {
- l4_fpage_t fpages[L4_FPAGE_SPAN_MAX];
- int nr_fpages = l4_fpage_span (dealloc_start,
- refed_start - 1, fpages);
-
- for (int i = 0; i < nr_fpages; i ++)
- {
-#ifndef NDEBUG
- memset ((void *) l4_address (frame->memory)
- + l4_address (fpages[i]),
- 0xde, l4_size (fpages[i]));
-#endif
- zfree (l4_address (frame->memory) + l4_address (fpages[i]),
- l4_size (fpages[i]));
- }
- }
-
- if (refed_len > 0)
- migrate (refed_start, refed_start + refed_len - 1);
- processed = refed_start + refed_len;
-
- if (refed_start + refed_len >= dealloc_start + dealloc_len)
- break;
-
- dealloc_len -= refed_start + refed_len - dealloc_start;
- dealloc_start = refed_start + refed_len;
-
- refed_start = dealloc_start + dealloc_len;
- refed_len = 0;
- }
-
- /* Move any remaining frame entries over. */
- if (processed < l4_size (frame->memory))
- migrate (processed, l4_size (frame->memory) - 1);
-
- /* And destroy the now redundant FRAME_ENTRY. But don't let it
- deallocate the physical memory! */
- frame->memory = l4_fpage (0, l4_size (frame->memory));
-
- assert (frame->refs == 1);
- assert (frame->frame_entries == frame_entry);
- assert (! frame->frame_entries->next);
- assert (frame_entry->shared_next == frame_entry);
- frame_entry_destroy (NULL, frame_entry, true);
- frame_entry_free (frame_entry);
- return 0;
-}
diff --git a/physmem/frame.c b/physmem/frame.c
deleted file mode 100644
index cd87388..0000000
--- a/physmem/frame.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* frame.c - Frame management.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Neal H. Walfield <neal@gnu.org>.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with the GNU Hurd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <assert.h>
-#include <errno.h>
-#include <pthread.h>
-#include <l4.h>
-#include <hurd/btree.h>
-#include <hurd/slab.h>
-
-#include <compiler.h>
-
-#include "priv.h"
-#include "zalloc.h"
-
-void
-frame_dump (struct frame *frame)
-{
- struct frame_entry *fe;
-
- printf ("frame: %x (%d refs). memory: %x+%x\n",
- frame, frame->refs,
- l4_address (frame->memory), l4_size (frame->memory));
- printf ("Frame entries: ");
- for (fe = frame->frame_entries; fe; fe = fe->next)
- printf ("fe %x:%x+%x@%x on %x", fe,
- fe->region.start, fe->region.size, fe->frame_offset,
- fe->frame);
- printf ("\n");
-}
-
-static error_t
-frame_constructor (void *hook, struct frame *frame)
-{
- frame->refs = 1;
- pthread_mutex_init (&frame->lock, 0);
- pthread_mutex_lock (&frame->lock);
- frame->frame_entries = 0;
- frame->cow = 0;
-
- return 0;
-}
-
-SLAB_CLASS(frame, struct frame)
-
-static struct hurd_frame_slab_space frame_space;
-
-void
-frame_init (void)
-{
- hurd_frame_slab_init (&frame_space, NULL, NULL,
- frame_constructor, NULL, NULL);
-}
-
-struct frame *
-frame_alloc (size_t size)
-{
- error_t err;
- struct frame *frame;
-
- /* The size must be a power of 2. */
- assert ((size & (size - 1)) == 0);
-
- err = hurd_frame_slab_alloc (&frame_space, &frame);
- if (err)
- /* XXX: Free some memory and try again. */
- assert_perror (err);
-
- assert (frame->refs == 1);
- frame->memory = l4_fpage (0, size);
- frame->may_be_mapped = 0;
- assert (frame->cow == 0);
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
- assert (frame->frame_entries == 0);
-
- return frame;
-}
-
-/* Allocate the reserved physical memory for frame FRAME. */
-void
-frame_memory_alloc (struct frame *frame)
-{
- assert (! l4_address (frame->memory));
-
- frame->memory = l4_fpage (zalloc (l4_size (frame->memory)),
- l4_size (frame->memory));
- if (! l4_address (frame->memory))
- /* We already have the memory reservation, we just need to find
- it. zalloc may have failed for a number of reasons:
-
- - There is no contiguous block of memory frame->SIZE bytes
- currently in the pool.
- - We may need to reclaim extra frames
- */
- {
- /* XXX: For now we just bail. */
- assert (l4_address (frame->memory));
- }
-
- debug ("allocated physical memory: %x+%x\n",
- l4_address (frame->memory), l4_size (frame->memory));
-}
-
-void
-frame_deref (struct frame *frame)
-{
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
-
- if (EXPECT_FALSE (frame->refs == 1))
- /* Last reference. Deallocate this frame. */
- {
- /* There better not be any users. */
- assert (! frame->frame_entries);
-
- if (frame->may_be_mapped)
- {
- assert (l4_address (frame->memory));
- l4_unmap_fpage (l4_fpage_add_rights (frame->memory,
- L4_FPAGE_FULLY_ACCESSIBLE));
- }
-
- if (l4_address (frame->memory))
- zfree (l4_address (frame->memory), l4_size (frame->memory));
-
- assert (frame->frame_entries == 0);
- assert (frame->cow == 0);
-#ifndef NDEBUG
- frame->memory = l4_fpage (0xDEAD000, 0);
-#endif
-
- hurd_frame_slab_dealloc (&frame_space, frame);
- }
- else
- {
- frame->refs --;
- pthread_mutex_unlock (&frame->lock);
- }
-}
-
-void
-frame_add_user (struct frame *frame, struct frame_entry *frame_entry)
-{
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
- assert (frame->refs > 0);
-
- /* Add FRAME_ENTRY to the list of the users of FRAME. */
- frame_entry->next = frame->frame_entries;
- if (frame_entry->next)
- frame_entry->next->prevp = &frame_entry->next;
- frame_entry->prevp = &frame->frame_entries;
- frame->frame_entries = frame_entry;
-}
-
-void
-frame_drop_user (struct frame *frame, struct frame_entry *frame_entry)
-{
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
- assert (frame->refs > 0);
- assert (frame_entry->frame == frame);
-
- *frame_entry->prevp = frame_entry->next;
- if (frame_entry->next)
- frame_entry->next->prevp = frame_entry->prevp;
-}
diff --git a/physmem/headers.m4 b/physmem/headers.m4
deleted file mode 100644
index ddb875c..0000000
--- a/physmem/headers.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-# headers.m4 - Autoconf snippets to install links for header files.
-# Copyright 2005 Free Software Foundation, Inc.
-# Written by Neal H. Walfield <neal@gnu.org>.
-#
-# This file is free software; as a special exception the author gives
-# unlimited permission to copy and/or distribute it, with or without
-# modifications, as long as this notice is preserved.
-#
-# This file is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
-# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-AC_CONFIG_LINKS([include/hurd/physmem.h:physmem/physmem.h])
diff --git a/physmem/ia32-cmain.c b/physmem/ia32-cmain.c
deleted file mode 100644
index 0dea3d1..0000000
--- a/physmem/ia32-cmain.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/* ia32-cmain.c - Startup code for the ia32.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <alloca.h>
-#include <stdint.h>
-
-#include <l4/globals.h>
-#include <l4/init.h>
-#include <l4/stubs.h>
-#include <l4/stubs-init.h>
-
-#include "priv.h"
-
-
-/* Initialize libl4, setup the argument vector, and pass control over
- to the main function. */
-void
-cmain (void)
-{
- int argc = 0;
- char **argv = 0;
-
- l4_init ();
- l4_init_stubs ();
-
- argc = 1;
- argv = alloca (sizeof (char *) * 2);
- argv[0] = program_name;
- argv[1] = 0;
-
- /* Now invoke the main function. */
- main (argc, argv);
-
- /* Never reached. */
-}
-
-
-#define __thread_stack_pointer() ({ \
- void *__sp__; \
- __asm__ ("movl %%esp, %0" : "=r" (__sp__)); \
- __sp__; \
-})
-
-
-#define __thread_set_stack_pointer(sp) ({ \
- __asm__ ("movl %0, %%esp" : : "r" (sp)); \
-})
-
-
-/* Switch execution transparently to thread TO. The thread FROM,
- which must be the current thread, will be halted. */
-void
-switch_thread (l4_thread_id_t from, l4_thread_id_t to)
-{
- void *current_stack;
- /* FIXME: Figure out how much we need. Probably only one return
- address. */
- char small_sub_stack[16];
- unsigned int i;
-
-/* FIXME: FROM is an argument to force gcc to evaluate it before the
- thread switch. Maybe this can be done better, but it's
- magical, so be careful. */
-
- /* Save the machine context. */
- __asm__ __volatile__ ("pusha");
- __asm__ __volatile__ ("pushf");
-
- /* Start the TO thread. It will be eventually become a clone of our
- thread. */
- current_stack = __thread_stack_pointer ();
- l4_start_sp_ip (to, (l4_word_t) current_stack,
- (l4_word_t) &&thread_switch_entry);
-
- /* We need a bit of extra space on the stack for
- l4_thread_switch. */
- __thread_set_stack_pointer (small_sub_stack + sizeof (small_sub_stack));
-
- /* We can't use while(1), because then gcc will become clever and
- optimize away everything after thread_switch_entry. */
- for (i = 1; i; i++)
- l4_thread_switch (to);
-
- thread_switch_entry:
- /* Restore the machine context. */
- __asm__ __volatile__ ("popf");
- __asm__ __volatile__ ("popa");
-
- /* The thread TO continues here. */
- l4_stop (from);
-}
diff --git a/physmem/ia32-crt0.S b/physmem/ia32-crt0.S
deleted file mode 100644
index dddd992..0000000
--- a/physmem/ia32-crt0.S
+++ /dev/null
@@ -1,44 +0,0 @@
-/* ia32-crt0.S - Startup code for ia32.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-/* The size of our stack (4KB). */
-#define STACK_SIZE 0x1000
-
- .text
-
- .globl start, _start
-start:
-_start:
- /* Initialize the stack pointer. */
- movl $(stack + STACK_SIZE), %esp
-
- /* Reset EFLAGS. */
- pushl $0
- popf
-
- /* Now enter the cmain function. */
- call cmain
-
- /* Not reached. */
-loop: hlt
- jmp loop
-
- /* Our stack area. */
- .comm stack, STACK_SIZE
diff --git a/physmem/malloc-wrap.c b/physmem/malloc-wrap.c
deleted file mode 100644
index 31c66ea..0000000
--- a/physmem/malloc-wrap.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* malloc-wrap.c - Doug Lea's malloc for the physical memory server.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-/* Configuration of Doug Lea's malloc. */
-
-#include <errno.h>
-
-#include <l4.h>
-
-#define __STD_C 1
-#define LACKS_UNISTD_H
-#define LACKS_SYS_PARAM_H
-#define LACKS_FCNTL_H
-
-/* We want to use optimized versions of memset and memcpy. */
-#define HAVE_MEMCPY
-
-/* We always use the supplied mmap emulation. */
-#define MORECORE(x) MORECORE_FAILURE
-#define HAVE_MMAP 1
-#define HAVE_MREMAP 0
-#define MMAP_CLEARS 1
-#define malloc_getpagesize l4_min_page_size ()
-#define MMAP_AS_MORECORE_SIZE (16 * malloc_getpagesize)
-#define DEFAULT_MMAP_THRESHOLD (4 * malloc_getpagesize)
-#define USE_MALLOC_LOCK 1
-
-/* Suppress debug output in mstats(). */
-#define fprintf(...)
-
-/* Now include Doug Lea's malloc. */
-#include "malloc.c"
diff --git a/physmem/malloc.c b/physmem/malloc.c
deleted file mode 100644
index ca9ca25..0000000
--- a/physmem/malloc.c
+++ /dev/null
@@ -1,5567 +0,0 @@
-/*
- This is a version (aka dlmalloc) of malloc/free/realloc written by
- Doug Lea and released to the public domain. Use, modify, and
- redistribute this code without permission or acknowledgement in any
- way you wish. Send questions, comments, complaints, performance
- data, etc to dl@cs.oswego.edu
-
-* VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
-
- Note: There may be an updated version of this malloc obtainable at
- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
- Check before installing!
-
-* Quickstart
-
- This library is all in one file to simplify the most common usage:
- ftp it, compile it (-O), and link it into another program. All
- of the compile-time options default to reasonable values for use on
- most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
- You might later want to step through various compile-time and dynamic
- tuning options.
-
- For convenience, an include file for code using this malloc is at:
- ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
- You don't really need this .h file unless you call functions not
- defined in your system include files. The .h file contains only the
- excerpts from this file needed for using this malloc on ANSI C/C++
- systems, so long as you haven't changed compile-time options about
- naming and tuning parameters. If you do, then you can create your
- own malloc.h that does include all settings by cutting at the point
- indicated below.
-
-* Why use this malloc?
-
- This is not the fastest, most space-conserving, most portable, or
- most tunable malloc ever written. However it is among the fastest
- while also being among the most space-conserving, portable and tunable.
- Consistent balance across these factors results in a good general-purpose
- allocator for malloc-intensive programs.
-
- The main properties of the algorithms are:
- * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
- with ties normally decided via FIFO (i.e. least recently used).
- * For small (<= 64 bytes by default) requests, it is a caching
- allocator, that maintains pools of quickly recycled chunks.
- * In between, and for combinations of large and small requests, it does
- the best it can trying to meet both goals at once.
- * For very large requests (>= 128KB by default), it relies on system
- memory mapping facilities, if supported.
-
- For a longer but slightly out of date high-level description, see
- http://gee.cs.oswego.edu/dl/html/malloc.html
-
- You may already by default be using a C library containing a malloc
- that is based on some version of this malloc (for example in
- linux). You might still want to use the one in this file in order to
- customize settings or to avoid overheads associated with library
- versions.
-
-* Contents, described in more detail in "description of public routines" below.
-
- Standard (ANSI/SVID/...) functions:
- malloc(size_t n);
- calloc(size_t n_elements, size_t element_size);
- free(Void_t* p);
- realloc(Void_t* p, size_t n);
- memalign(size_t alignment, size_t n);
- valloc(size_t n);
- mallinfo()
- mallopt(int parameter_number, int parameter_value)
-
- Additional functions:
- independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
- independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
- pvalloc(size_t n);
- cfree(Void_t* p);
- malloc_trim(size_t pad);
- malloc_usable_size(Void_t* p);
- malloc_stats();
-
-* Vital statistics:
-
- Supported pointer representation: 4 or 8 bytes
- Supported size_t representation: 4 or 8 bytes
- Note that size_t is allowed to be 4 bytes even if pointers are 8.
- You can adjust this by defining INTERNAL_SIZE_T
-
- Alignment: 2 * sizeof(size_t) (default)
- (i.e., 8 byte alignment with 4byte size_t). This suffices for
- nearly all current machines and C compilers. However, you can
- define MALLOC_ALIGNMENT to be wider than this if necessary.
-
- Minimum overhead per allocated chunk: 4 or 8 bytes
- Each malloced chunk has a hidden word of overhead holding size
- and status information.
-
- Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
- 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
-
- When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
- ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
- needed; 4 (8) for a trailing size field and 8 (16) bytes for
- free list pointers. Thus, the minimum allocatable size is
- 16/24/32 bytes.
-
- Even a request for zero bytes (i.e., malloc(0)) returns a
- pointer to something of the minimum allocatable size.
-
- The maximum overhead wastage (i.e., number of extra bytes
- allocated than were requested in malloc) is less than or equal
- to the minimum size, except for requests >= mmap_threshold that
- are serviced via mmap(), where the worst case wastage is 2 *
- sizeof(size_t) bytes plus the remainder from a system page (the
- minimal mmap unit); typically 4096 or 8192 bytes.
-
- Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
- 8-byte size_t: 2^64 minus about two pages
-
- It is assumed that (possibly signed) size_t values suffice to
- represent chunk sizes. `Possibly signed' is due to the fact
- that `size_t' may be defined on a system as either a signed or
- an unsigned type. The ISO C standard says that it must be
- unsigned, but a few systems are known not to adhere to this.
- Additionally, even when size_t is unsigned, sbrk (which is by
- default used to obtain memory from system) accepts signed
- arguments, and may not be able to handle size_t-wide arguments
- with negative sign bit. Generally, values that would
- appear as negative after accounting for overhead and alignment
- are supported only via mmap(), which does not have this
- limitation.
-
- Requests for sizes outside the allowed range will perform an optional
- failure action and then return null. (Requests may also
- also fail because a system is out of memory.)
-
- Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
-
- When USE_MALLOC_LOCK is defined, wrappers are created to
- surround every public call with either a pthread mutex or
- a win32 spinlock (depending on WIN32). This is not
- especially fast, and can be a major bottleneck.
- It is designed only to provide minimal protection
- in concurrent environments, and to provide a basis for
- extensions. If you are using malloc in a concurrent program,
- you would be far better off obtaining ptmalloc, which is
- derived from a version of this malloc, and is well-tuned for
- concurrent programs. (See http://www.malloc.de) Note that
- even when USE_MALLOC_LOCK is defined, you can can guarantee
- full thread-safety only if no threads acquire memory through
- direct calls to MORECORE or other system-level allocators.
-
- Compliance: I believe it is compliant with the 1997 Single Unix Specification
- (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
- others as well.
-
-* Synopsis of compile-time options:
-
- People have reported using previous versions of this malloc on all
- versions of Unix, sometimes by tweaking some of the defines
- below. It has been tested most extensively on Solaris and
- Linux. It is also reported to work on WIN32 platforms.
- People also report using it in stand-alone embedded systems.
-
- The implementation is in straight, hand-tuned ANSI C. It is not
- at all modular. (Sorry!) It uses a lot of macros. To be at all
- usable, this code should be compiled using an optimizing compiler
- (for example gcc -O3) that can simplify expressions and control
- paths. (FAQ: some macros import variables as arguments rather than
- declare locals because people reported that some debuggers
- otherwise get confused.)
-
- OPTION DEFAULT VALUE
-
- Compilation Environment options:
-
- __STD_C derived from C compiler defines
- WIN32 NOT defined
- HAVE_MEMCPY defined
- USE_MEMCPY 1 if HAVE_MEMCPY is defined
- HAVE_MMAP defined as 1
- MMAP_CLEARS 1
- HAVE_MREMAP 0 unless linux defined
- malloc_getpagesize derived from system #includes, or 4096 if not
- HAVE_USR_INCLUDE_MALLOC_H NOT defined
- LACKS_UNISTD_H NOT defined unless WIN32
- LACKS_SYS_PARAM_H NOT defined unless WIN32
- LACKS_SYS_MMAN_H NOT defined unless WIN32
- LACKS_FCNTL_H NOT defined
-
- Changing default word sizes:
-
- INTERNAL_SIZE_T size_t
- MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
- PTR_UINT unsigned long
- CHUNK_SIZE_T unsigned long
-
- Configuration and functionality options:
-
- USE_DL_PREFIX NOT defined
- USE_PUBLIC_MALLOC_WRAPPERS NOT defined
- USE_MALLOC_LOCK NOT defined
- DEBUG NOT defined
- REALLOC_ZERO_BYTES_FREES NOT defined
- MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
- TRIM_FASTBINS 0
- FIRST_SORTED_BIN_SIZE 512
-
- Options for customizing MORECORE:
-
- MORECORE sbrk
- MORECORE_CONTIGUOUS 1
- MORECORE_CANNOT_TRIM NOT defined
- MMAP_AS_MORECORE_SIZE (1024 * 1024)
-
- Tuning options that are also dynamically changeable via mallopt:
-
- DEFAULT_MXFAST 64
- DEFAULT_TRIM_THRESHOLD 256 * 1024
- DEFAULT_TOP_PAD 0
- DEFAULT_MMAP_THRESHOLD 256 * 1024
- DEFAULT_MMAP_MAX 65536
-
- There are several other #defined constants and macros that you
- probably don't want to touch unless you are extending or adapting malloc.
-*/
-
-/*
- WIN32 sets up defaults for MS environment and compilers.
- Otherwise defaults are for unix.
-*/
-
-/* #define WIN32 */
-
-#ifdef WIN32
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-/* Win32 doesn't supply or need the following headers */
-#define LACKS_UNISTD_H
-#define LACKS_SYS_PARAM_H
-#define LACKS_SYS_MMAN_H
-
-/* Use the supplied emulation of sbrk */
-#define MORECORE sbrk
-#define MORECORE_CONTIGUOUS 1
-#define MORECORE_FAILURE ((void*)(-1))
-
-/* Use the supplied emulation of mmap and munmap */
-#define HAVE_MMAP 1
-#define MUNMAP_FAILURE (-1)
-#define MMAP_CLEARS 1
-
-/* These values don't really matter in windows mmap emulation */
-#define MAP_PRIVATE 1
-#define MAP_ANONYMOUS 2
-#define PROT_READ 1
-#define PROT_WRITE 2
-
-/* Emulation functions defined at the end of this file */
-
-/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
-#ifdef USE_MALLOC_LOCK
-static int slwait(int *sl);
-static int slrelease(int *sl);
-#endif
-
-static long getpagesize(void);
-static long getregionsize(void);
-static void *sbrk(long size);
-static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
-static long munmap(void *ptr, long size);
-
-static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed);
-static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user);
-
-#endif
-
-/*
- __STD_C should be nonzero if using ANSI-standard C compiler, a C++
- compiler, or a C compiler sufficiently close to ANSI to get away
- with it.
-*/
-
-#ifndef __STD_C
-#if defined(__STDC__) || defined(_cplusplus)
-#define __STD_C 1
-#else
-#define __STD_C 0
-#endif
-#endif /*__STD_C*/
-
-
-/*
- Void_t* is the pointer type that malloc should say it returns
-*/
-
-#ifndef Void_t
-#if (__STD_C || defined(WIN32))
-#define Void_t void
-#else
-#define Void_t char
-#endif
-#endif /*Void_t*/
-
-#if __STD_C
-#include <stddef.h> /* for size_t */
-#else
-#include <sys/types.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
-
-/* #define LACKS_UNISTD_H */
-
-#ifndef LACKS_UNISTD_H
-#include <unistd.h>
-#endif
-
-/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
-
-/* #define LACKS_SYS_PARAM_H */
-
-
-#include <stdio.h> /* needed for malloc_stats */
-#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
-
-
-/*
- Debugging:
-
- Because freed chunks may be overwritten with bookkeeping fields, this
- malloc will often die when freed memory is overwritten by user
- programs. This can be very effective (albeit in an annoying way)
- in helping track down dangling pointers.
-
- If you compile with -DDEBUG, a number of assertion checks are
- enabled that will catch more memory errors. You probably won't be
- able to make much sense of the actual assertion errors, but they
- should help you locate incorrectly overwritten memory. The
- checking is fairly extensive, and will slow down execution
- noticeably. Calling malloc_stats or mallinfo with DEBUG set will
- attempt to check every non-mmapped allocated and free chunk in the
- course of computing the summmaries. (By nature, mmapped regions
- cannot be checked very much automatically.)
-
- Setting DEBUG may also be helpful if you are trying to modify
- this code. The assertions in the check routines spell out in more
- detail the assumptions and invariants underlying the algorithms.
-
- Setting DEBUG does NOT provide an automated mechanism for checking
- that all accesses to malloced memory stay within their
- bounds. However, there are several add-ons and adaptations of this
- or other mallocs available that do this.
-*/
-
-#if DEBUG
-#include <assert.h>
-#else
-#define assert(x) ((void)0)
-#endif
-
-/*
- The unsigned integer type used for comparing any two chunk sizes.
- This should be at least as wide as size_t, but should not be signed.
-*/
-
-#ifndef CHUNK_SIZE_T
-#define CHUNK_SIZE_T unsigned long
-#endif
-
-/*
- The unsigned integer type used to hold addresses when they are are
- manipulated as integers. Except that it is not defined on all
- systems, intptr_t would suffice.
-*/
-#ifndef PTR_UINT
-#define PTR_UINT unsigned long
-#endif
-
-
-/*
- INTERNAL_SIZE_T is the word-size used for internal bookkeeping
- of chunk sizes.
-
- The default version is the same as size_t.
-
- While not strictly necessary, it is best to define this as an
- unsigned type, even if size_t is a signed type. This may avoid some
- artificial size limitations on some systems.
-
- On a 64-bit machine, you may be able to reduce malloc overhead by
- defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
- expense of not being able to handle more than 2^32 of malloced
- space. If this limitation is acceptable, you are encouraged to set
- this unless you are on a platform requiring 16byte alignments. In
- this case the alignment requirements turn out to negate any
- potential advantages of decreasing size_t word size.
-
- Implementors: Beware of the possible combinations of:
- - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
- and might be the same width as int or as long
- - size_t might have different width and signedness as INTERNAL_SIZE_T
- - int and long might be 32 or 64 bits, and might be the same width
- To deal with this, most comparisons and difference computations
- among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
- aware of the fact that casting an unsigned int to a wider long does
- not sign-extend. (This also makes checking for negative numbers
- awkward.) Some of these casts result in harmless compiler warnings
- on some systems.
-*/
-
-#ifndef INTERNAL_SIZE_T
-#define INTERNAL_SIZE_T size_t
-#endif
-
-/* The corresponding word size */
-#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
-
-
-
-/*
- MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
- It must be a power of two at least 2 * SIZE_SZ, even on machines
- for which smaller alignments would suffice. It may be defined as
- larger than this though. Note however that code and data structures
- are optimized for the case of 8-byte alignment.
-*/
-
-
-#ifndef MALLOC_ALIGNMENT
-#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
-#endif
-
-/* The corresponding bit mask value */
-#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
-
-
-
-/*
- REALLOC_ZERO_BYTES_FREES should be set if a call to
- realloc with zero bytes should be the same as a call to free.
- Some people think it should. Otherwise, since this malloc
- returns a unique pointer for malloc(0), so does realloc(p, 0).
-*/
-
-/* #define REALLOC_ZERO_BYTES_FREES */
-
-/*
- TRIM_FASTBINS controls whether free() of a very small chunk can
- immediately lead to trimming. Setting to true (1) can reduce memory
- footprint, but will almost always slow down programs that use a lot
- of small chunks.
-
- Define this only if you are willing to give up some speed to more
- aggressively reduce system-level memory footprint when releasing
- memory in programs that use many small chunks. You can get
- essentially the same effect by setting MXFAST to 0, but this can
- lead to even greater slowdowns in programs using many small chunks.
- TRIM_FASTBINS is an in-between compile-time option, that disables
- only those chunks bordering topmost memory from being placed in
- fastbins.
-*/
-
-#ifndef TRIM_FASTBINS
-#define TRIM_FASTBINS 0
-#endif
-
-
-/*
- USE_DL_PREFIX will prefix all public routines with the string 'dl'.
- This is necessary when you only want to use this malloc in one part
- of a program, using your regular system malloc elsewhere.
-*/
-
-/* #define USE_DL_PREFIX */
-
-
-/*
- USE_MALLOC_LOCK causes wrapper functions to surround each
- callable routine with pthread mutex lock/unlock.
-
- USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
-*/
-
-
-/* #define USE_MALLOC_LOCK */
-
-
-/*
- If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
- actually a wrapper function that first calls MALLOC_PREACTION, then
- calls the internal routine, and follows it with
- MALLOC_POSTACTION. This is needed for locking, but you can also use
- this, without USE_MALLOC_LOCK, for purposes of interception,
- instrumentation, etc. It is a sad fact that using wrappers often
- noticeably degrades performance of malloc-intensive programs.
-*/
-
-#ifdef USE_MALLOC_LOCK
-#define USE_PUBLIC_MALLOC_WRAPPERS
-#else
-/* #define USE_PUBLIC_MALLOC_WRAPPERS */
-#endif
-
-
-/*
- Two-phase name translation.
- All of the actual routines are given mangled names.
- When wrappers are used, they become the public callable versions.
- When DL_PREFIX is used, the callable names are prefixed.
-*/
-
-#ifndef USE_PUBLIC_MALLOC_WRAPPERS
-#define cALLOc public_cALLOc
-#define fREe public_fREe
-#define cFREe public_cFREe
-#define mALLOc public_mALLOc
-#define mEMALIGn public_mEMALIGn
-#define rEALLOc public_rEALLOc
-#define vALLOc public_vALLOc
-#define pVALLOc public_pVALLOc
-#define mALLINFo public_mALLINFo
-#define mALLOPt public_mALLOPt
-#define mTRIm public_mTRIm
-#define mSTATs public_mSTATs
-#define mUSABLe public_mUSABLe
-#define iCALLOc public_iCALLOc
-#define iCOMALLOc public_iCOMALLOc
-#endif
-
-#ifdef USE_DL_PREFIX
-#define public_cALLOc dlcalloc
-#define public_fREe dlfree
-#define public_cFREe dlcfree
-#define public_mALLOc dlmalloc
-#define public_mEMALIGn dlmemalign
-#define public_rEALLOc dlrealloc
-#define public_vALLOc dlvalloc
-#define public_pVALLOc dlpvalloc
-#define public_mALLINFo dlmallinfo
-#define public_mALLOPt dlmallopt
-#define public_mTRIm dlmalloc_trim
-#define public_mSTATs dlmalloc_stats
-#define public_mUSABLe dlmalloc_usable_size
-#define public_iCALLOc dlindependent_calloc
-#define public_iCOMALLOc dlindependent_comalloc
-#else /* USE_DL_PREFIX */
-#define public_cALLOc calloc
-#define public_fREe free
-#define public_cFREe cfree
-#define public_mALLOc malloc
-#define public_mEMALIGn memalign
-#define public_rEALLOc realloc
-#define public_vALLOc valloc
-#define public_pVALLOc pvalloc
-#define public_mALLINFo mallinfo
-#define public_mALLOPt mallopt
-#define public_mTRIm malloc_trim
-#define public_mSTATs malloc_stats
-#define public_mUSABLe malloc_usable_size
-#define public_iCALLOc independent_calloc
-#define public_iCOMALLOc independent_comalloc
-#endif /* USE_DL_PREFIX */
-
-
-/*
- HAVE_MEMCPY should be defined if you are not otherwise using
- ANSI STD C, but still have memcpy and memset in your C library
- and want to use them in calloc and realloc. Otherwise simple
- macro versions are defined below.
-
- USE_MEMCPY should be defined as 1 if you actually want to
- have memset and memcpy called. People report that the macro
- versions are faster than libc versions on some systems.
-
- Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
- (of <= 36 bytes) are manually unrolled in realloc and calloc.
-*/
-
-#define HAVE_MEMCPY
-
-#ifndef USE_MEMCPY
-#ifdef HAVE_MEMCPY
-#define USE_MEMCPY 1
-#else
-#define USE_MEMCPY 0
-#endif
-#endif
-
-
-#if (__STD_C || defined(HAVE_MEMCPY))
-
-#ifdef WIN32
-/* On Win32 memset and memcpy are already declared in windows.h */
-#else
-#if __STD_C
-void* memset(void*, int, size_t);
-void* memcpy(void*, const void*, size_t);
-#else
-Void_t* memset();
-Void_t* memcpy();
-#endif
-#endif
-#endif
-
-/*
- MALLOC_FAILURE_ACTION is the action to take before "return 0" when
- malloc fails to be able to return memory, either because memory is
- exhausted or because of illegal arguments.
-
- By default, sets errno if running on STD_C platform, else does nothing.
-*/
-
-#ifndef MALLOC_FAILURE_ACTION
-#if __STD_C
-#define MALLOC_FAILURE_ACTION \
- errno = ENOMEM;
-
-#else
-#define MALLOC_FAILURE_ACTION
-#endif
-#endif
-
-/*
- MORECORE-related declarations. By default, rely on sbrk
-*/
-
-
-#ifdef LACKS_UNISTD_H
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
-#if __STD_C
-extern Void_t* sbrk(ptrdiff_t);
-#else
-extern Void_t* sbrk();
-#endif
-#endif
-#endif
-
-/*
- MORECORE is the name of the routine to call to obtain more memory
- from the system. See below for general guidance on writing
- alternative MORECORE functions, as well as a version for WIN32 and a
- sample version for pre-OSX macos.
-*/
-
-#ifndef MORECORE
-#define MORECORE sbrk
-#endif
-
-/*
- MORECORE_FAILURE is the value returned upon failure of MORECORE
- as well as mmap. Since it cannot be an otherwise valid memory address,
- and must reflect values of standard sys calls, you probably ought not
- try to redefine it.
-*/
-
-#ifndef MORECORE_FAILURE
-#define MORECORE_FAILURE (-1)
-#endif
-
-/*
- If MORECORE_CONTIGUOUS is true, take advantage of fact that
- consecutive calls to MORECORE with positive arguments always return
- contiguous increasing addresses. This is true of unix sbrk. Even
- if not defined, when regions happen to be contiguous, malloc will
- permit allocations spanning regions obtained from different
- calls. But defining this when applicable enables some stronger
- consistency checks and space efficiencies.
-*/
-
-#ifndef MORECORE_CONTIGUOUS
-#define MORECORE_CONTIGUOUS 1
-#endif
-
-/*
- Define MORECORE_CANNOT_TRIM if your version of MORECORE
- cannot release space back to the system when given negative
- arguments. This is generally necessary only if you are using
- a hand-crafted MORECORE function that cannot handle negative arguments.
-*/
-
-/* #define MORECORE_CANNOT_TRIM */
-
-
-/*
- Define HAVE_MMAP as true to optionally make malloc() use mmap() to
- allocate very large blocks. These will be returned to the
- operating system immediately after a free(). Also, if mmap
- is available, it is used as a backup strategy in cases where
- MORECORE fails to provide space from system.
-
- This malloc is best tuned to work with mmap for large requests.
- If you do not have mmap, operations involving very large chunks (1MB
- or so) may be slower than you'd like.
-*/
-
-#ifndef HAVE_MMAP
-#define HAVE_MMAP 1
-#endif
-
-#if HAVE_MMAP
-/*
- Standard unix mmap using /dev/zero clears memory so calloc doesn't
- need to.
-*/
-
-#ifndef MMAP_CLEARS
-#define MMAP_CLEARS 1
-#endif
-
-#else /* no mmap */
-#ifndef MMAP_CLEARS
-#define MMAP_CLEARS 0
-#endif
-#endif
-
-
-/*
- MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
- sbrk fails, and mmap is used as a backup (which is done only if
- HAVE_MMAP). The value must be a multiple of page size. This
- backup strategy generally applies only when systems have "holes" in
- address space, so sbrk cannot perform contiguous expansion, but
- there is still space available on system. On systems for which
- this is known to be useful (i.e. most linux kernels), this occurs
- only when programs allocate huge amounts of memory. Between this,
- and the fact that mmap regions tend to be limited, the size should
- be large, to avoid too many mmap calls and thus avoid running out
- of kernel resources.
-*/
-
-#ifndef MMAP_AS_MORECORE_SIZE
-#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
-#endif
-
-/*
- Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
- large blocks. This is currently only possible on Linux with
- kernel versions newer than 1.3.77.
-*/
-
-#ifndef HAVE_MREMAP
-#ifdef linux
-#define HAVE_MREMAP 1
-#else
-#define HAVE_MREMAP 0
-#endif
-
-#endif /* HAVE_MMAP */
-
-
-/*
- The system page size. To the extent possible, this malloc manages
- memory from the system in page-size units. Note that this value is
- cached during initialization into a field of malloc_state. So even
- if malloc_getpagesize is a function, it is only called once.
-
- The following mechanics for getpagesize were adapted from bsd/gnu
- getpagesize.h. If none of the system-probes here apply, a value of
- 4096 is used, which should be OK: If they don't apply, then using
- the actual value probably doesn't impact performance.
-*/
-
-
-#ifndef malloc_getpagesize
-
-#ifndef LACKS_UNISTD_H
-# include <unistd.h>
-#endif
-
-# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
-# ifndef _SC_PAGE_SIZE
-# define _SC_PAGE_SIZE _SC_PAGESIZE
-# endif
-# endif
-
-# ifdef _SC_PAGE_SIZE
-# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
-# else
-# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
- extern size_t getpagesize();
-# define malloc_getpagesize getpagesize()
-# else
-# ifdef WIN32 /* use supplied emulation of getpagesize */
-# define malloc_getpagesize getpagesize()
-# else
-# ifndef LACKS_SYS_PARAM_H
-# include <sys/param.h>
-# endif
-# ifdef EXEC_PAGESIZE
-# define malloc_getpagesize EXEC_PAGESIZE
-# else
-# ifdef NBPG
-# ifndef CLSIZE
-# define malloc_getpagesize NBPG
-# else
-# define malloc_getpagesize (NBPG * CLSIZE)
-# endif
-# else
-# ifdef NBPC
-# define malloc_getpagesize NBPC
-# else
-# ifdef PAGESIZE
-# define malloc_getpagesize PAGESIZE
-# else /* just guess */
-# define malloc_getpagesize (4096)
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-#endif
-
-/*
- This version of malloc supports the standard SVID/XPG mallinfo
- routine that returns a struct containing usage properties and
- statistics. It should work on any SVID/XPG compliant system that has
- a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
- install such a thing yourself, cut out the preliminary declarations
- as described above and below and save them in a malloc.h file. But
- there's no compelling reason to bother to do this.)
-
- The main declaration needed is the mallinfo struct that is returned
- (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
- bunch of fields that are not even meaningful in this version of
- malloc. These fields are are instead filled by mallinfo() with
- other numbers that might be of interest.
-
- HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
- /usr/include/malloc.h file that includes a declaration of struct
- mallinfo. If so, it is included; else an SVID2/XPG2 compliant
- version is declared below. These must be precisely the same for
- mallinfo() to work. The original SVID version of this struct,
- defined on most systems with mallinfo, declares all fields as
- ints. But some others define as unsigned long. If your system
- defines the fields using a type of different width than listed here,
- you must #include your system version and #define
- HAVE_USR_INCLUDE_MALLOC_H.
-*/
-
-/* #define HAVE_USR_INCLUDE_MALLOC_H */
-
-#ifdef HAVE_USR_INCLUDE_MALLOC_H
-#include "/usr/include/malloc.h"
-#else
-
-/* SVID2/XPG mallinfo structure */
-
-struct mallinfo {
- int arena; /* non-mmapped space allocated from system */
- int ordblks; /* number of free chunks */
- int smblks; /* number of fastbin blocks */
- int hblks; /* number of mmapped regions */
- int hblkhd; /* space in mmapped regions */
- int usmblks; /* maximum total allocated space */
- int fsmblks; /* space available in freed fastbin blocks */
- int uordblks; /* total allocated space */
- int fordblks; /* total free space */
- int keepcost; /* top-most, releasable (via malloc_trim) space */
-};
-
-/*
- SVID/XPG defines four standard parameter numbers for mallopt,
- normally defined in malloc.h. Only one of these (M_MXFAST) is used
- in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
- so setting them has no effect. But this malloc also supports other
- options in mallopt described below.
-*/
-#endif
-
-
-/* ---------- description of public routines ------------ */
-
-/*
- malloc(size_t n)
- Returns a pointer to a newly allocated chunk of at least n bytes, or null
- if no space is available. Additionally, on failure, errno is
- set to ENOMEM on ANSI C systems.
-
- If n is zero, malloc returns a minumum-sized chunk. (The minimum
- size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
- systems.) On most systems, size_t is an unsigned type, so calls
- with negative arguments are interpreted as requests for huge amounts
- of space, which will often fail. The maximum supported value of n
- differs across systems, but is in all cases less than the maximum
- representable value of a size_t.
-*/
-#if __STD_C
-Void_t* public_mALLOc(size_t);
-#else
-Void_t* public_mALLOc();
-#endif
-
-/*
- free(Void_t* p)
- Releases the chunk of memory pointed to by p, that had been previously
- allocated using malloc or a related routine such as realloc.
- It has no effect if p is null. It can have arbitrary (i.e., bad!)
- effects if p has already been freed.
-
- Unless disabled (using mallopt), freeing very large spaces will
- when possible, automatically trigger operations that give
- back unused memory to the system, thus reducing program footprint.
-*/
-#if __STD_C
-void public_fREe(Void_t*);
-#else
-void public_fREe();
-#endif
-
-/*
- calloc(size_t n_elements, size_t element_size);
- Returns a pointer to n_elements * element_size bytes, with all locations
- set to zero.
-*/
-#if __STD_C
-Void_t* public_cALLOc(size_t, size_t);
-#else
-Void_t* public_cALLOc();
-#endif
-
-/*
- realloc(Void_t* p, size_t n)
- Returns a pointer to a chunk of size n that contains the same data
- as does chunk p up to the minimum of (n, p's size) bytes, or null
- if no space is available.
-
- The returned pointer may or may not be the same as p. The algorithm
- prefers extending p when possible, otherwise it employs the
- equivalent of a malloc-copy-free sequence.
-
- If p is null, realloc is equivalent to malloc.
-
- If space is not available, realloc returns null, errno is set (if on
- ANSI) and p is NOT freed.
-
- if n is for fewer bytes than already held by p, the newly unused
- space is lopped off and freed if possible. Unless the #define
- REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
- zero (re)allocates a minimum-sized chunk.
-
- Large chunks that were internally obtained via mmap will always
- be reallocated using malloc-copy-free sequences unless
- the system supports MREMAP (currently only linux).
-
- The old unix realloc convention of allowing the last-free'd chunk
- to be used as an argument to realloc is not supported.
-*/
-#if __STD_C
-Void_t* public_rEALLOc(Void_t*, size_t);
-#else
-Void_t* public_rEALLOc();
-#endif
-
-/*
- memalign(size_t alignment, size_t n);
- Returns a pointer to a newly allocated chunk of n bytes, aligned
- in accord with the alignment argument.
-
- The alignment argument should be a power of two. If the argument is
- not a power of two, the nearest greater power is used.
- 8-byte alignment is guaranteed by normal malloc calls, so don't
- bother calling memalign with an argument of 8 or less.
-
- Overreliance on memalign is a sure way to fragment space.
-*/
-#if __STD_C
-Void_t* public_mEMALIGn(size_t, size_t);
-#else
-Void_t* public_mEMALIGn();
-#endif
-
-/*
- valloc(size_t n);
- Equivalent to memalign(pagesize, n), where pagesize is the page
- size of the system. If the pagesize is unknown, 4096 is used.
-*/
-#if __STD_C
-Void_t* public_vALLOc(size_t);
-#else
-Void_t* public_vALLOc();
-#endif
-
-
-
-/*
- mallopt(int parameter_number, int parameter_value)
- Sets tunable parameters The format is to provide a
- (parameter-number, parameter-value) pair. mallopt then sets the
- corresponding parameter to the argument value if it can (i.e., so
- long as the value is meaningful), and returns 1 if successful else
- 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
- normally defined in malloc.h. Only one of these (M_MXFAST) is used
- in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
- so setting them has no effect. But this malloc also supports four
- other options in mallopt. See below for details. Briefly, supported
- parameters are as follows (listed defaults are for "typical"
- configurations).
-
- Symbol param # default allowed param values
- M_MXFAST 1 64 0-80 (0 disables fastbins)
- M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
- M_TOP_PAD -2 0 any
- M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
- M_MMAP_MAX -4 65536 any (0 disables use of mmap)
-*/
-#if __STD_C
-int public_mALLOPt(int, int);
-#else
-int public_mALLOPt();
-#endif
-
-
-/*
- mallinfo()
- Returns (by copy) a struct containing various summary statistics:
-
- arena: current total non-mmapped bytes allocated from system
- ordblks: the number of free chunks
- smblks: the number of fastbin blocks (i.e., small chunks that
- have been freed but not use resused or consolidated)
- hblks: current number of mmapped regions
- hblkhd: total bytes held in mmapped regions
- usmblks: the maximum total allocated space. This will be greater
- than current total if trimming has occurred.
- fsmblks: total bytes held in fastbin blocks
- uordblks: current total allocated space (normal or mmapped)
- fordblks: total free space
- keepcost: the maximum number of bytes that could ideally be released
- back to system via malloc_trim. ("ideally" means that
- it ignores page restrictions etc.)
-
- Because these fields are ints, but internal bookkeeping may
- be kept as longs, the reported values may wrap around zero and
- thus be inaccurate.
-*/
-#if __STD_C
-struct mallinfo public_mALLINFo(void);
-#else
-struct mallinfo public_mALLINFo();
-#endif
-
-/*
- independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
-
- independent_calloc is similar to calloc, but instead of returning a
- single cleared space, it returns an array of pointers to n_elements
- independent elements that can hold contents of size elem_size, each
- of which starts out cleared, and can be independently freed,
- realloc'ed etc. The elements are guaranteed to be adjacently
- allocated (this is not guaranteed to occur with multiple callocs or
- mallocs), which may also improve cache locality in some
- applications.
-
- The "chunks" argument is optional (i.e., may be null, which is
- probably the most typical usage). If it is null, the returned array
- is itself dynamically allocated and should also be freed when it is
- no longer needed. Otherwise, the chunks array must be of at least
- n_elements in length. It is filled in with the pointers to the
- chunks.
-
- In either case, independent_calloc returns this pointer array, or
- null if the allocation failed. If n_elements is zero and "chunks"
- is null, it returns a chunk representing an array with zero elements
- (which should be freed if not wanted).
-
- Each element must be individually freed when it is no longer
- needed. If you'd like to instead be able to free all at once, you
- should instead use regular calloc and assign pointers into this
- space to represent elements. (In this case though, you cannot
- independently free elements.)
-
- independent_calloc simplifies and speeds up implementations of many
- kinds of pools. It may also be useful when constructing large data
- structures that initially have a fixed number of fixed-sized nodes,
- but the number is not known at compile time, and some of the nodes
- may later need to be freed. For example:
-
- struct Node { int item; struct Node* next; };
-
- struct Node* build_list() {
- struct Node** pool;
- int n = read_number_of_nodes_needed();
- if (n <= 0) return 0;
- pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
- if (pool == 0) die();
- // organize into a linked list...
- struct Node* first = pool[0];
- for (i = 0; i < n-1; ++i)
- pool[i]->next = pool[i+1];
- free(pool); // Can now free the array (or not, if it is needed later)
- return first;
- }
-*/
-#if __STD_C
-Void_t** public_iCALLOc(size_t, size_t, Void_t**);
-#else
-Void_t** public_iCALLOc();
-#endif
-
-/*
- independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
-
- independent_comalloc allocates, all at once, a set of n_elements
- chunks with sizes indicated in the "sizes" array. It returns
- an array of pointers to these elements, each of which can be
- independently freed, realloc'ed etc. The elements are guaranteed to
- be adjacently allocated (this is not guaranteed to occur with
- multiple callocs or mallocs), which may also improve cache locality
- in some applications.
-
- The "chunks" argument is optional (i.e., may be null). If it is null
- the returned array is itself dynamically allocated and should also
- be freed when it is no longer needed. Otherwise, the chunks array
- must be of at least n_elements in length. It is filled in with the
- pointers to the chunks.
-
- In either case, independent_comalloc returns this pointer array, or
- null if the allocation failed. If n_elements is zero and chunks is
- null, it returns a chunk representing an array with zero elements
- (which should be freed if not wanted).
-
- Each element must be individually freed when it is no longer
- needed. If you'd like to instead be able to free all at once, you
- should instead use a single regular malloc, and assign pointers at
- particular offsets in the aggregate space. (In this case though, you
- cannot independently free elements.)
-
- independent_comallac differs from independent_calloc in that each
- element may have a different size, and also that it does not
- automatically clear elements.
-
- independent_comalloc can be used to speed up allocation in cases
- where several structs or objects must always be allocated at the
- same time. For example:
-
- struct Head { ... }
- struct Foot { ... }
-
- void send_message(char* msg) {
- int msglen = strlen(msg);
- size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
- void* chunks[3];
- if (independent_comalloc(3, sizes, chunks) == 0)
- die();
- struct Head* head = (struct Head*)(chunks[0]);
- char* body = (char*)(chunks[1]);
- struct Foot* foot = (struct Foot*)(chunks[2]);
- // ...
- }
-
- In general though, independent_comalloc is worth using only for
- larger values of n_elements. For small values, you probably won't
- detect enough difference from series of malloc calls to bother.
-
- Overuse of independent_comalloc can increase overall memory usage,
- since it cannot reuse existing noncontiguous small chunks that
- might be available for some of the elements.
-*/
-#if __STD_C
-Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
-#else
-Void_t** public_iCOMALLOc();
-#endif
-
-
-/*
- pvalloc(size_t n);
- Equivalent to valloc(minimum-page-that-holds(n)), that is,
- round up n to nearest pagesize.
- */
-#if __STD_C
-Void_t* public_pVALLOc(size_t);
-#else
-Void_t* public_pVALLOc();
-#endif
-
-/*
- cfree(Void_t* p);
- Equivalent to free(p).
-
- cfree is needed/defined on some systems that pair it with calloc,
- for odd historical reasons (such as: cfree is used in example
- code in the first edition of K&R).
-*/
-#if __STD_C
-void public_cFREe(Void_t*);
-#else
-void public_cFREe();
-#endif
-
-/*
- malloc_trim(size_t pad);
-
- If possible, gives memory back to the system (via negative
- arguments to sbrk) if there is unused memory at the `high' end of
- the malloc pool. You can call this after freeing large blocks of
- memory to potentially reduce the system-level memory requirements
- of a program. However, it cannot guarantee to reduce memory. Under
- some allocation patterns, some large free blocks of memory will be
- locked between two used chunks, so they cannot be given back to
- the system.
-
- The `pad' argument to malloc_trim represents the amount of free
- trailing space to leave untrimmed. If this argument is zero,
- only the minimum amount of memory to maintain internal data
- structures will be left (one page or less). Non-zero arguments
- can be supplied to maintain enough trailing space to service
- future expected allocations without having to re-obtain memory
- from the system.
-
- Malloc_trim returns 1 if it actually released any memory, else 0.
- On systems that do not support "negative sbrks", it will always
- rreturn 0.
-*/
-#if __STD_C
-int public_mTRIm(size_t);
-#else
-int public_mTRIm();
-#endif
-
-/*
- malloc_usable_size(Void_t* p);
-
- Returns the number of bytes you can actually use in
- an allocated chunk, which may be more than you requested (although
- often not) due to alignment and minimum size constraints.
- You can use this many bytes without worrying about
- overwriting other allocated objects. This is not a particularly great
- programming practice. malloc_usable_size can be more useful in
- debugging and assertions, for example:
-
- p = malloc(n);
- assert(malloc_usable_size(p) >= 256);
-
-*/
-#if __STD_C
-size_t public_mUSABLe(Void_t*);
-#else
-size_t public_mUSABLe();
-#endif
-
-/*
- malloc_stats();
- Prints on stderr the amount of space obtained from the system (both
- via sbrk and mmap), the maximum amount (which may be more than
- current if malloc_trim and/or munmap got called), and the current
- number of bytes allocated via malloc (or realloc, etc) but not yet
- freed. Note that this is the number of bytes allocated, not the
- number requested. It will be larger than the number requested
- because of alignment and bookkeeping overhead. Because it includes
- alignment wastage as being in use, this figure may be greater than
- zero even when no user-level chunks are allocated.
-
- The reported current and maximum system memory can be inaccurate if
- a program makes other calls to system memory allocation functions
- (normally sbrk) outside of malloc.
-
- malloc_stats prints only the most commonly interesting statistics.
- More information can be obtained by calling mallinfo.
-
-*/
-#if __STD_C
-void public_mSTATs();
-#else
-void public_mSTATs();
-#endif
-
-/* mallopt tuning options */
-
-/*
- M_MXFAST is the maximum request size used for "fastbins", special bins
- that hold returned chunks without consolidating their spaces. This
- enables future requests for chunks of the same size to be handled
- very quickly, but can increase fragmentation, and thus increase the
- overall memory footprint of a program.
-
- This malloc manages fastbins very conservatively yet still
- efficiently, so fragmentation is rarely a problem for values less
- than or equal to the default. The maximum supported value of MXFAST
- is 80. You wouldn't want it any higher than this anyway. Fastbins
- are designed especially for use with many small structs, objects or
- strings -- the default handles structs/objects/arrays with sizes up
- to 16 4byte fields, or small strings representing words, tokens,
- etc. Using fastbins for larger objects normally worsens
- fragmentation without improving speed.
-
- M_MXFAST is set in REQUEST size units. It is internally used in
- chunksize units, which adds padding and alignment. You can reduce
- M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
- algorithm to be a closer approximation of fifo-best-fit in all cases,
- not just for larger requests, but will generally cause it to be
- slower.
-*/
-
-
-/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
-#ifndef M_MXFAST
-#define M_MXFAST 1
-#endif
-
-#ifndef DEFAULT_MXFAST
-#define DEFAULT_MXFAST 64
-#endif
-
-
-/*
- M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
- to keep before releasing via malloc_trim in free().
-
- Automatic trimming is mainly useful in long-lived programs.
- Because trimming via sbrk can be slow on some systems, and can
- sometimes be wasteful (in cases where programs immediately
- afterward allocate more large chunks) the value should be high
- enough so that your overall system performance would improve by
- releasing this much memory.
-
- The trim threshold and the mmap control parameters (see below)
- can be traded off with one another. Trimming and mmapping are
- two different ways of releasing unused memory back to the
- system. Between these two, it is often possible to keep
- system-level demands of a long-lived program down to a bare
- minimum. For example, in one test suite of sessions measuring
- the XF86 X server on Linux, using a trim threshold of 128K and a
- mmap threshold of 192K led to near-minimal long term resource
- consumption.
-
- If you are using this malloc in a long-lived program, it should
- pay to experiment with these values. As a rough guide, you
- might set to a value close to the average size of a process
- (program) running on your system. Releasing this much memory
- would allow such a process to run in memory. Generally, it's
- worth it to tune for trimming rather tham memory mapping when a
- program undergoes phases where several large chunks are
- allocated and released in ways that can reuse each other's
- storage, perhaps mixed with phases where there are no such
- chunks at all. And in well-behaved long-lived programs,
- controlling release of large blocks via trimming versus mapping
- is usually faster.
-
- However, in most programs, these parameters serve mainly as
- protection against the system-level effects of carrying around
- massive amounts of unneeded memory. Since frequent calls to
- sbrk, mmap, and munmap otherwise degrade performance, the default
- parameters are set to relatively high values that serve only as
- safeguards.
-
- The trim value must be greater than page size to have any useful
- effect. To disable trimming completely, you can set to
- (unsigned long)(-1)
-
- Trim settings interact with fastbin (MXFAST) settings: Unless
- TRIM_FASTBINS is defined, automatic trimming never takes place upon
- freeing a chunk with size less than or equal to MXFAST. Trimming is
- instead delayed until subsequent freeing of larger chunks. However,
- you can still force an attempted trim by calling malloc_trim.
-
- Also, trimming is not generally possible in cases where
- the main arena is obtained via mmap.
-
- Note that the trick some people use of mallocing a huge space and
- then freeing it at program startup, in an attempt to reserve system
- memory, doesn't have the intended effect under automatic trimming,
- since that memory will immediately be returned to the system.
-*/
-
-#define M_TRIM_THRESHOLD -1
-
-#ifndef DEFAULT_TRIM_THRESHOLD
-#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
-#endif
-
-/*
- M_TOP_PAD is the amount of extra `padding' space to allocate or
- retain whenever sbrk is called. It is used in two ways internally:
-
- * When sbrk is called to extend the top of the arena to satisfy
- a new malloc request, this much padding is added to the sbrk
- request.
-
- * When malloc_trim is called automatically from free(),
- it is used as the `pad' argument.
-
- In both cases, the actual amount of padding is rounded
- so that the end of the arena is always a system page boundary.
-
- The main reason for using padding is to avoid calling sbrk so
- often. Having even a small pad greatly reduces the likelihood
- that nearly every malloc request during program start-up (or
- after trimming) will invoke sbrk, which needlessly wastes
- time.
-
- Automatic rounding-up to page-size units is normally sufficient
- to avoid measurable overhead, so the default is 0. However, in
- systems where sbrk is relatively slow, it can pay to increase
- this value, at the expense of carrying around more memory than
- the program needs.
-*/
-
-#define M_TOP_PAD -2
-
-#ifndef DEFAULT_TOP_PAD
-#define DEFAULT_TOP_PAD (0)
-#endif
-
-/*
- M_MMAP_THRESHOLD is the request size threshold for using mmap()
- to service a request. Requests of at least this size that cannot
- be allocated using already-existing space will be serviced via mmap.
- (If enough normal freed space already exists it is used instead.)
-
- Using mmap segregates relatively large chunks of memory so that
- they can be individually obtained and released from the host
- system. A request serviced through mmap is never reused by any
- other request (at least not directly; the system may just so
- happen to remap successive requests to the same locations).
-
- Segregating space in this way has the benefits that:
-
- 1. Mmapped space can ALWAYS be individually released back
- to the system, which helps keep the system level memory
- demands of a long-lived program low.
- 2. Mapped memory can never become `locked' between
- other chunks, as can happen with normally allocated chunks, which
- means that even trimming via malloc_trim would not release them.
- 3. On some systems with "holes" in address spaces, mmap can obtain
- memory that sbrk cannot.
-
- However, it has the disadvantages that:
-
- 1. The space cannot be reclaimed, consolidated, and then
- used to service later requests, as happens with normal chunks.
- 2. It can lead to more wastage because of mmap page alignment
- requirements
- 3. It causes malloc performance to be more dependent on host
- system memory management support routines which may vary in
- implementation quality and may impose arbitrary
- limitations. Generally, servicing a request via normal
- malloc steps is faster than going through a system's mmap.
-
- The advantages of mmap nearly always outweigh disadvantages for
- "large" chunks, but the value of "large" varies across systems. The
- default is an empirically derived value that works well in most
- systems.
-*/
-
-#define M_MMAP_THRESHOLD -3
-
-#ifndef DEFAULT_MMAP_THRESHOLD
-#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
-#endif
-
-/*
- M_MMAP_MAX is the maximum number of requests to simultaneously
- service using mmap. This parameter exists because
-. Some systems have a limited number of internal tables for
- use by mmap, and using more than a few of them may degrade
- performance.
-
- The default is set to a value that serves only as a safeguard.
- Setting to 0 disables use of mmap for servicing large requests. If
- HAVE_MMAP is not set, the default value is 0, and attempts to set it
- to non-zero values in mallopt will fail.
-*/
-
-#define M_MMAP_MAX -4
-
-#ifndef DEFAULT_MMAP_MAX
-#if HAVE_MMAP
-#define DEFAULT_MMAP_MAX (65536)
-#else
-#define DEFAULT_MMAP_MAX (0)
-#endif
-#endif
-
-#ifdef __cplusplus
-}; /* end of extern "C" */
-#endif
-
-/*
- ========================================================================
- To make a fully customizable malloc.h header file, cut everything
- above this line, put into file malloc.h, edit to suit, and #include it
- on the next line, as well as in programs that use this malloc.
- ========================================================================
-*/
-
-/* #include "malloc.h" */
-
-/* --------------------- public wrappers ---------------------- */
-
-#ifdef USE_PUBLIC_MALLOC_WRAPPERS
-
-/* Declare all routines as internal */
-#if __STD_C
-static Void_t* mALLOc(size_t);
-static void fREe(Void_t*);
-static Void_t* rEALLOc(Void_t*, size_t);
-static Void_t* mEMALIGn(size_t, size_t);
-static Void_t* vALLOc(size_t);
-static Void_t* pVALLOc(size_t);
-static Void_t* cALLOc(size_t, size_t);
-static Void_t** iCALLOc(size_t, size_t, Void_t**);
-static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
-static void cFREe(Void_t*);
-static int mTRIm(size_t);
-static size_t mUSABLe(Void_t*);
-static void mSTATs();
-static int mALLOPt(int, int);
-static struct mallinfo mALLINFo(void);
-#else
-static Void_t* mALLOc();
-static void fREe();
-static Void_t* rEALLOc();
-static Void_t* mEMALIGn();
-static Void_t* vALLOc();
-static Void_t* pVALLOc();
-static Void_t* cALLOc();
-static Void_t** iCALLOc();
-static Void_t** iCOMALLOc();
-static void cFREe();
-static int mTRIm();
-static size_t mUSABLe();
-static void mSTATs();
-static int mALLOPt();
-static struct mallinfo mALLINFo();
-#endif
-
-/*
- MALLOC_PREACTION and MALLOC_POSTACTION should be
- defined to return 0 on success, and nonzero on failure.
- The return value of MALLOC_POSTACTION is currently ignored
- in wrapper functions since there is no reasonable default
- action to take on failure.
-*/
-
-
-#ifdef USE_MALLOC_LOCK
-
-#ifdef WIN32
-
-static int mALLOC_MUTEx;
-#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
-#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
-
-#else
-
-#include <pthread.h>
-
-static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
-
-#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
-#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
-
-#endif /* USE_MALLOC_LOCK */
-
-#else
-
-/* Substitute anything you like for these */
-
-#define MALLOC_PREACTION (0)
-#define MALLOC_POSTACTION (0)
-
-#endif
-
-Void_t* public_mALLOc(size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = mALLOc(bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-void public_fREe(Void_t* m) {
- if (MALLOC_PREACTION != 0) {
- return;
- }
- fREe(m);
- if (MALLOC_POSTACTION != 0) {
- }
-}
-
-Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = rEALLOc(m, bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = mEMALIGn(alignment, bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_vALLOc(size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = vALLOc(bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_pVALLOc(size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = pVALLOc(bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_cALLOc(size_t n, size_t elem_size) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = cALLOc(n, elem_size);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-
-Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
- Void_t** m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = iCALLOc(n, elem_size, chunks);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
- Void_t** m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = iCOMALLOc(n, sizes, chunks);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-void public_cFREe(Void_t* m) {
- if (MALLOC_PREACTION != 0) {
- return;
- }
- cFREe(m);
- if (MALLOC_POSTACTION != 0) {
- }
-}
-
-int public_mTRIm(size_t s) {
- int result;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- result = mTRIm(s);
- if (MALLOC_POSTACTION != 0) {
- }
- return result;
-}
-
-size_t public_mUSABLe(Void_t* m) {
- size_t result;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- result = mUSABLe(m);
- if (MALLOC_POSTACTION != 0) {
- }
- return result;
-}
-
-void public_mSTATs() {
- if (MALLOC_PREACTION != 0) {
- return;
- }
- mSTATs();
- if (MALLOC_POSTACTION != 0) {
- }
-}
-
-struct mallinfo public_mALLINFo() {
- struct mallinfo m;
- if (MALLOC_PREACTION != 0) {
- struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- return nm;
- }
- m = mALLINFo();
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-int public_mALLOPt(int p, int v) {
- int result;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- result = mALLOPt(p, v);
- if (MALLOC_POSTACTION != 0) {
- }
- return result;
-}
-
-#endif
-
-
-
-/* ------------- Optional versions of memcopy ---------------- */
-
-
-#if USE_MEMCPY
-
-/*
- Note: memcpy is ONLY invoked with non-overlapping regions,
- so the (usually slower) memmove is not needed.
-*/
-
-#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
-#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
-
-#else /* !USE_MEMCPY */
-
-/* Use Duff's device for good zeroing/copying performance. */
-
-#define MALLOC_ZERO(charp, nbytes) \
-do { \
- INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
- CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
- long mcn; \
- if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
- switch (mctmp) { \
- case 0: for(;;) { *mzp++ = 0; \
- case 7: *mzp++ = 0; \
- case 6: *mzp++ = 0; \
- case 5: *mzp++ = 0; \
- case 4: *mzp++ = 0; \
- case 3: *mzp++ = 0; \
- case 2: *mzp++ = 0; \
- case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
- } \
-} while(0)
-
-#define MALLOC_COPY(dest,src,nbytes) \
-do { \
- INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
- INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
- CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
- long mcn; \
- if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
- switch (mctmp) { \
- case 0: for(;;) { *mcdst++ = *mcsrc++; \
- case 7: *mcdst++ = *mcsrc++; \
- case 6: *mcdst++ = *mcsrc++; \
- case 5: *mcdst++ = *mcsrc++; \
- case 4: *mcdst++ = *mcsrc++; \
- case 3: *mcdst++ = *mcsrc++; \
- case 2: *mcdst++ = *mcsrc++; \
- case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
- } \
-} while(0)
-
-#endif
-
-/* ------------------ MMAP support ------------------ */
-
-
-#if HAVE_MMAP
-
-#ifndef LACKS_FCNTL_H
-#include <fcntl.h>
-#endif
-
-#ifndef LACKS_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-/*
- Nearly all versions of mmap support MAP_ANONYMOUS,
- so the following is unlikely to be needed, but is
- supplied just in case.
-*/
-
-#ifndef MAP_ANONYMOUS
-
-static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
-
-#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
- (dev_zero_fd = open("/dev/zero", O_RDWR), \
- mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
- mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
-
-#else
-
-#define MMAP(addr, size, prot, flags) \
- (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
-
-#endif
-
-
-#endif /* HAVE_MMAP */
-
-
-/*
- ----------------------- Chunk representations -----------------------
-*/
-
-
-/*
- This struct declaration is misleading (but accurate and necessary).
- It declares a "view" into memory allowing access to necessary
- fields at known offsets from a given base. See explanation below.
-*/
-
-struct malloc_chunk {
-
- INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
- INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
-
- struct malloc_chunk* fd; /* double links -- used only if free. */
- struct malloc_chunk* bk;
-};
-
-
-typedef struct malloc_chunk* mchunkptr;
-
-/*
- malloc_chunk details:
-
- (The following includes lightly edited explanations by Colin Plumb.)
-
- Chunks of memory are maintained using a `boundary tag' method as
- described in e.g., Knuth or Standish. (See the paper by Paul
- Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
- survey of such techniques.) Sizes of free chunks are stored both
- in the front of each chunk and at the end. This makes
- consolidating fragmented chunks into bigger chunks very fast. The
- size fields also hold bits representing whether chunks are free or
- in use.
-
- An allocated chunk looks like this:
-
-
- chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if allocated | |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |P|
- mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | User data starts here... .
- . .
- . (malloc_usable_space() bytes) .
- . |
-nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-
- Where "chunk" is the front of the chunk for the purpose of most of
- the malloc code, but "mem" is the pointer that is returned to the
- user. "Nextchunk" is the beginning of the next contiguous chunk.
-
- Chunks always begin on even word boundries, so the mem portion
- (which is returned to the user) is also on an even word boundary, and
- thus at least double-word aligned.
-
- Free chunks are stored in circular doubly-linked lists, and look like this:
-
- chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `head:' | Size of chunk, in bytes |P|
- mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Forward pointer to next chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Back pointer to previous chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Unused space (may be 0 bytes long) .
- . .
- . |
-nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `foot:' | Size of chunk, in bytes |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
- The P (PREV_INUSE) bit, stored in the unused low-order bit of the
- chunk size (which is always a multiple of two words), is an in-use
- bit for the *previous* chunk. If that bit is *clear*, then the
- word before the current chunk size contains the previous chunk
- size, and can be used to find the front of the previous chunk.
- The very first chunk allocated always has this bit set,
- preventing access to non-existent (or non-owned) memory. If
- prev_inuse is set for any given chunk, then you CANNOT determine
- the size of the previous chunk, and might even get a memory
- addressing fault when trying to do so.
-
- Note that the `foot' of the current chunk is actually represented
- as the prev_size of the NEXT chunk. This makes it easier to
- deal with alignments etc but can be very confusing when trying
- to extend or adapt this code.
-
- The two exceptions to all this are
-
- 1. The special chunk `top' doesn't bother using the
- trailing size field since there is no next contiguous chunk
- that would have to index off it. After initialization, `top'
- is forced to always exist. If it would become less than
- MINSIZE bytes long, it is replenished.
-
- 2. Chunks allocated via mmap, which have the second-lowest-order
- bit (IS_MMAPPED) set in their size fields. Because they are
- allocated one-by-one, each must contain its own trailing size field.
-
-*/
-
-/*
- ---------- Size and alignment checks and conversions ----------
-*/
-
-/* conversion from malloc headers to user pointers, and back */
-
-#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
-#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
-
-/* The smallest possible chunk */
-#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
-
-/* The smallest size we can malloc is an aligned minimal chunk */
-
-#define MINSIZE \
- (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
-
-/* Check if m has acceptable alignment */
-
-#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
-
-
-/*
- Check if a request is so large that it would wrap around zero when
- padded and aligned. To simplify some other code, the bound is made
- low enough so that adding MINSIZE will also not wrap around sero.
-*/
-
-#define REQUEST_OUT_OF_RANGE(req) \
- ((CHUNK_SIZE_T)(req) >= \
- (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE))
-
-/* pad request bytes into a usable size -- internal version */
-
-#define request2size(req) \
- (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
- MINSIZE : \
- ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
-
-/* Same, except also perform argument check */
-
-#define checked_request2size(req, sz) \
- if (REQUEST_OUT_OF_RANGE(req)) { \
- MALLOC_FAILURE_ACTION; \
- return 0; \
- } \
- (sz) = request2size(req);
-
-/*
- --------------- Physical chunk operations ---------------
-*/
-
-
-/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
-#define PREV_INUSE 0x1
-
-/* extract inuse bit of previous chunk */
-#define prev_inuse(p) ((p)->size & PREV_INUSE)
-
-
-/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
-#define IS_MMAPPED 0x2
-
-/* check for mmap()'ed chunk */
-#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
-
-/*
- Bits to mask off when extracting size
-
- Note: IS_MMAPPED is intentionally not masked off from size field in
- macros for which mmapped chunks should never be seen. This should
- cause helpful core dumps to occur if it is tried by accident by
- people extending or adapting this malloc.
-*/
-#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
-
-/* Get size, ignoring use bits */
-#define chunksize(p) ((p)->size & ~(SIZE_BITS))
-
-
-/* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
-
-/* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
-
-/* Treat space at ptr + offset as a chunk */
-#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
-
-/* extract p's inuse bit */
-#define inuse(p)\
-((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
-
-/* set/clear chunk as being inuse without otherwise disturbing */
-#define set_inuse(p)\
-((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
-
-#define clear_inuse(p)\
-((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
-
-
-/* check/set/clear inuse bits in known places */
-#define inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
-
-#define set_inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
-
-#define clear_inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
-
-
-/* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
-
-/* Set size/use field */
-#define set_head(p, s) ((p)->size = (s))
-
-/* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
-
-
-/*
- -------------------- Internal data structures --------------------
-
- All internal state is held in an instance of malloc_state defined
- below. There are no other static variables, except in two optional
- cases:
- * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
- * If HAVE_MMAP is true, but mmap doesn't support
- MAP_ANONYMOUS, a dummy file descriptor for mmap.
-
- Beware of lots of tricks that minimize the total bookkeeping space
- requirements. The result is a little over 1K bytes (for 4byte
- pointers and size_t.)
-*/
-
-/*
- Bins
-
- An array of bin headers for free chunks. Each bin is doubly
- linked. The bins are approximately proportionally (log) spaced.
- There are a lot of these bins (128). This may look excessive, but
- works very well in practice. Most bins hold sizes that are
- unusual as malloc request sizes, but are more usual for fragments
- and consolidated sets of chunks, which is what these bins hold, so
- they can be found quickly. All procedures maintain the invariant
- that no consolidated chunk physically borders another one, so each
- chunk in a list is known to be preceeded and followed by either
- inuse chunks or the ends of memory.
-
- Chunks in bins are kept in size order, with ties going to the
- approximately least recently used chunk. Ordering isn't needed
- for the small bins, which all contain the same-sized chunks, but
- facilitates best-fit allocation for larger chunks. These lists
- are just sequential. Keeping them in order almost never requires
- enough traversal to warrant using fancier ordered data
- structures.
-
- Chunks of the same size are linked with the most
- recently freed at the front, and allocations are taken from the
- back. This results in LRU (FIFO) allocation order, which tends
- to give each chunk an equal opportunity to be consolidated with
- adjacent freed chunks, resulting in larger free chunks and less
- fragmentation.
-
- To simplify use in double-linked lists, each bin header acts
- as a malloc_chunk. This avoids special-casing for headers.
- But to conserve space and improve locality, we allocate
- only the fd/bk pointers of bins, and then use repositioning tricks
- to treat these as the fields of a malloc_chunk*.
-*/
-
-typedef struct malloc_chunk* mbinptr;
-
-/* addressing -- note that bin_at(0) does not exist */
-#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
-
-/* analog of ++bin */
-#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
-
-/* Reminders about list directionality within bins */
-#define first(b) ((b)->fd)
-#define last(b) ((b)->bk)
-
-/* Take a chunk off a bin list */
-#define unlink(P, BK, FD) { \
- FD = P->fd; \
- BK = P->bk; \
- FD->bk = BK; \
- BK->fd = FD; \
-}
-
-/*
- Indexing
-
- Bins for sizes < 512 bytes contain chunks of all the same size, spaced
- 8 bytes apart. Larger bins are approximately logarithmically spaced:
-
- 64 bins of size 8
- 32 bins of size 64
- 16 bins of size 512
- 8 bins of size 4096
- 4 bins of size 32768
- 2 bins of size 262144
- 1 bin of size what's left
-
- The bins top out around 1MB because we expect to service large
- requests via mmap.
-*/
-
-#define NBINS 96
-#define NSMALLBINS 32
-#define SMALLBIN_WIDTH 8
-#define MIN_LARGE_SIZE 256
-
-#define in_smallbin_range(sz) \
- ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
-
-#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
-
-/*
- Compute index for size. We expect this to be inlined when
- compiled with optimization, else not, which works out well.
-*/
-static int largebin_index(unsigned int sz) {
- unsigned int x = sz >> SMALLBIN_WIDTH;
- unsigned int m; /* bit position of highest set bit of m */
-
- if (x >= 0x10000) return NBINS-1;
-
- /* On intel, use BSRL instruction to find highest bit */
-#if defined(__GNUC__) && defined(i386)
-
- __asm__("bsrl %1,%0\n\t"
- : "=r" (m)
- : "g" (x));
-
-#else
- {
- /*
- Based on branch-free nlz algorithm in chapter 5 of Henry
- S. Warren Jr's book "Hacker's Delight".
- */
-
- unsigned int n = ((x - 0x100) >> 16) & 8;
- x <<= n;
- m = ((x - 0x1000) >> 16) & 4;
- n += m;
- x <<= m;
- m = ((x - 0x4000) >> 16) & 2;
- n += m;
- x = (x << m) >> 14;
- m = 13 - n + (x & ~(x>>1));
- }
-#endif
-
- /* Use next 2 bits to create finer-granularity bins */
- return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
-}
-
-#define bin_index(sz) \
- ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
-
-/*
- FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
- first bin that is maintained in sorted order. This must
- be the smallest size corresponding to a given bin.
-
- Normally, this should be MIN_LARGE_SIZE. But you can weaken
- best fit guarantees to sometimes speed up malloc by increasing value.
- Doing this means that malloc may choose a chunk that is
- non-best-fitting by up to the width of the bin.
-
- Some useful cutoff values:
- 512 - all bins sorted
- 2560 - leaves bins <= 64 bytes wide unsorted
- 12288 - leaves bins <= 512 bytes wide unsorted
- 65536 - leaves bins <= 4096 bytes wide unsorted
- 262144 - leaves bins <= 32768 bytes wide unsorted
- -1 - no bins sorted (not recommended!)
-*/
-
-#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
-/* #define FIRST_SORTED_BIN_SIZE 65536 */
-
-/*
- Unsorted chunks
-
- All remainders from chunk splits, as well as all returned chunks,
- are first placed in the "unsorted" bin. They are then placed
- in regular bins after malloc gives them ONE chance to be used before
- binning. So, basically, the unsorted_chunks list acts as a queue,
- with chunks being placed on it in free (and malloc_consolidate),
- and taken off (to be either used or placed in bins) in malloc.
-*/
-
-/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
-#define unsorted_chunks(M) (bin_at(M, 1))
-
-/*
- Top
-
- The top-most available chunk (i.e., the one bordering the end of
- available memory) is treated specially. It is never included in
- any bin, is used only if no other chunk is available, and is
- released back to the system if it is very large (see
- M_TRIM_THRESHOLD). Because top initially
- points to its own bin with initial zero size, thus forcing
- extension on the first malloc request, we avoid having any special
- code in malloc to check whether it even exists yet. But we still
- need to do so when getting memory from system, so we make
- initial_top treat the bin as a legal but unusable chunk during the
- interval between initialization and the first call to
- sYSMALLOc. (This is somewhat delicate, since it relies on
- the 2 preceding words to be zero during this interval as well.)
-*/
-
-/* Conveniently, the unsorted bin can be used as dummy top on first call */
-#define initial_top(M) (unsorted_chunks(M))
-
-/*
- Binmap
-
- To help compensate for the large number of bins, a one-level index
- structure is used for bin-by-bin searching. `binmap' is a
- bitvector recording whether bins are definitely empty so they can
- be skipped over during during traversals. The bits are NOT always
- cleared as soon as bins are empty, but instead only
- when they are noticed to be empty during traversal in malloc.
-*/
-
-/* Conservatively use 32 bits per map word, even if on 64bit system */
-#define BINMAPSHIFT 5
-#define BITSPERMAP (1U << BINMAPSHIFT)
-#define BINMAPSIZE (NBINS / BITSPERMAP)
-
-#define idx2block(i) ((i) >> BINMAPSHIFT)
-#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
-
-#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
-#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
-#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
-
-/*
- Fastbins
-
- An array of lists holding recently freed small chunks. Fastbins
- are not doubly linked. It is faster to single-link them, and
- since chunks are never removed from the middles of these lists,
- double linking is not necessary. Also, unlike regular bins, they
- are not even processed in FIFO order (they use faster LIFO) since
- ordering doesn't much matter in the transient contexts in which
- fastbins are normally used.
-
- Chunks in fastbins keep their inuse bit set, so they cannot
- be consolidated with other free chunks. malloc_consolidate
- releases all chunks in fastbins and consolidates them with
- other free chunks.
-*/
-
-typedef struct malloc_chunk* mfastbinptr;
-
-/* offset 2 to use otherwise unindexable first 2 bins */
-#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
-
-/* The maximum fastbin request size we support */
-#define MAX_FAST_SIZE 80
-
-#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
-
-/*
- FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
- that triggers automatic consolidation of possibly-surrounding
- fastbin chunks. This is a heuristic, so the exact value should not
- matter too much. It is defined at half the default trim threshold as a
- compromise heuristic to only attempt consolidation if it is likely
- to lead to trimming. However, it is not dynamically tunable, since
- consolidation reduces fragmentation surrounding loarge chunks even
- if trimming is not used.
-*/
-
-#define FASTBIN_CONSOLIDATION_THRESHOLD \
- ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
-
-/*
- Since the lowest 2 bits in max_fast don't matter in size comparisons,
- they are used as flags.
-*/
-
-/*
- ANYCHUNKS_BIT held in max_fast indicates that there may be any
- freed chunks at all. It is set true when entering a chunk into any
- bin.
-*/
-
-#define ANYCHUNKS_BIT (1U)
-
-#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
-#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
-#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
-
-/*
- FASTCHUNKS_BIT held in max_fast indicates that there are probably
- some fastbin chunks. It is set true on entering a chunk into any
- fastbin, and cleared only in malloc_consolidate.
-*/
-
-#define FASTCHUNKS_BIT (2U)
-
-#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
-#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
-#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
-
-/*
- Set value of max_fast.
- Use impossibly small value if 0.
-*/
-
-#define set_max_fast(M, s) \
- (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
- ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
-
-#define get_max_fast(M) \
- ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
-
-
-/*
- morecore_properties is a status word holding dynamically discovered
- or controlled properties of the morecore function
-*/
-
-#define MORECORE_CONTIGUOUS_BIT (1U)
-
-#define contiguous(M) \
- (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
-#define noncontiguous(M) \
- (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
-#define set_contiguous(M) \
- ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
-#define set_noncontiguous(M) \
- ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
-
-
-/*
- ----------- Internal state representation and initialization -----------
-*/
-
-struct malloc_state {
-
- /* The maximum chunk size to be eligible for fastbin */
- INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
-
- /* Fastbins */
- mfastbinptr fastbins[NFASTBINS];
-
- /* Base of the topmost chunk -- not otherwise kept in a bin */
- mchunkptr top;
-
- /* The remainder from the most recent split of a small request */
- mchunkptr last_remainder;
-
- /* Normal bins packed as described above */
- mchunkptr bins[NBINS * 2];
-
- /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
- unsigned int binmap[BINMAPSIZE+1];
-
- /* Tunable parameters */
- CHUNK_SIZE_T trim_threshold;
- INTERNAL_SIZE_T top_pad;
- INTERNAL_SIZE_T mmap_threshold;
-
- /* Memory map support */
- int n_mmaps;
- int n_mmaps_max;
- int max_n_mmaps;
-
- /* Cache malloc_getpagesize */
- unsigned int pagesize;
-
- /* Track properties of MORECORE */
- unsigned int morecore_properties;
-
- /* Statistics */
- INTERNAL_SIZE_T mmapped_mem;
- INTERNAL_SIZE_T sbrked_mem;
- INTERNAL_SIZE_T max_sbrked_mem;
- INTERNAL_SIZE_T max_mmapped_mem;
- INTERNAL_SIZE_T max_total_mem;
-};
-
-typedef struct malloc_state *mstate;
-
-/*
- There is exactly one instance of this struct in this malloc.
- If you are adapting this malloc in a way that does NOT use a static
- malloc_state, you MUST explicitly zero-fill it before using. This
- malloc relies on the property that malloc_state is initialized to
- all zeroes (as is true of C statics).
-*/
-
-static struct malloc_state av_; /* never directly referenced */
-
-/*
- All uses of av_ are via get_malloc_state().
- At most one "call" to get_malloc_state is made per invocation of
- the public versions of malloc and free, but other routines
- that in turn invoke malloc and/or free may call more then once.
- Also, it is called in check* routines if DEBUG is set.
-*/
-
-#define get_malloc_state() (&(av_))
-
-/*
- Initialize a malloc_state struct.
-
- This is called only from within malloc_consolidate, which needs
- be called in the same contexts anyway. It is never called directly
- outside of malloc_consolidate because some optimizing compilers try
- to inline it at all call points, which turns out not to be an
- optimization at all. (Inlining it in malloc_consolidate is fine though.)
-*/
-
-#if __STD_C
-static void malloc_init_state(mstate av)
-#else
-static void malloc_init_state(av) mstate av;
-#endif
-{
- int i;
- mbinptr bin;
-
- /* Establish circular links for normal bins */
- for (i = 1; i < NBINS; ++i) {
- bin = bin_at(av,i);
- bin->fd = bin->bk = bin;
- }
-
- av->top_pad = DEFAULT_TOP_PAD;
- av->n_mmaps_max = DEFAULT_MMAP_MAX;
- av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
- av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
-
-#if MORECORE_CONTIGUOUS
- set_contiguous(av);
-#else
- set_noncontiguous(av);
-#endif
-
-
- set_max_fast(av, DEFAULT_MXFAST);
-
- av->top = initial_top(av);
- av->pagesize = malloc_getpagesize;
-}
-
-/*
- Other internal utilities operating on mstates
-*/
-
-#if __STD_C
-static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
-static int sYSTRIm(size_t, mstate);
-static void malloc_consolidate(mstate);
-static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
-#else
-static Void_t* sYSMALLOc();
-static int sYSTRIm();
-static void malloc_consolidate();
-static Void_t** iALLOc();
-#endif
-
-/*
- Debugging support
-
- These routines make a number of assertions about the states
- of data structures that should be true at all times. If any
- are not true, it's very likely that a user program has somehow
- trashed memory. (It's also possible that there is a coding error
- in malloc. In which case, please report it!)
-*/
-
-#if ! DEBUG
-
-#define check_chunk(P)
-#define check_free_chunk(P)
-#define check_inuse_chunk(P)
-#define check_remalloced_chunk(P,N)
-#define check_malloced_chunk(P,N)
-#define check_malloc_state()
-
-#else
-#define check_chunk(P) do_check_chunk(P)
-#define check_free_chunk(P) do_check_free_chunk(P)
-#define check_inuse_chunk(P) do_check_inuse_chunk(P)
-#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
-#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
-#define check_malloc_state() do_check_malloc_state()
-
-/*
- Properties of all chunks
-*/
-
-#if __STD_C
-static void do_check_chunk(mchunkptr p)
-#else
-static void do_check_chunk(p) mchunkptr p;
-#endif
-{
- mstate av = get_malloc_state();
- CHUNK_SIZE_T sz = chunksize(p);
- /* min and max possible addresses assuming contiguous allocation */
- char* max_address = (char*)(av->top) + chunksize(av->top);
- char* min_address = max_address - av->sbrked_mem;
-
- if (!chunk_is_mmapped(p)) {
-
- /* Has legal address ... */
- if (p != av->top) {
- if (contiguous(av)) {
- assert(((char*)p) >= min_address);
- assert(((char*)p + sz) <= ((char*)(av->top)));
- }
- }
- else {
- /* top size is always at least MINSIZE */
- assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
- /* top predecessor always marked inuse */
- assert(prev_inuse(p));
- }
-
- }
- else {
-#if HAVE_MMAP
- /* address is outside main heap */
- if (contiguous(av) && av->top != initial_top(av)) {
- assert(((char*)p) < min_address || ((char*)p) > max_address);
- }
- /* chunk is page-aligned */
- assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
- /* mem is aligned */
- assert(aligned_OK(chunk2mem(p)));
-#else
- /* force an appropriate assert violation if debug set */
- assert(!chunk_is_mmapped(p));
-#endif
- }
-}
-
-/*
- Properties of free chunks
-*/
-
-#if __STD_C
-static void do_check_free_chunk(mchunkptr p)
-#else
-static void do_check_free_chunk(p) mchunkptr p;
-#endif
-{
- mstate av = get_malloc_state();
-
- INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
- mchunkptr next = chunk_at_offset(p, sz);
-
- do_check_chunk(p);
-
- /* Chunk must claim to be free ... */
- assert(!inuse(p));
- assert (!chunk_is_mmapped(p));
-
- /* Unless a special marker, must have OK fields */
- if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
- {
- assert((sz & MALLOC_ALIGN_MASK) == 0);
- assert(aligned_OK(chunk2mem(p)));
- /* ... matching footer field */
- assert(next->prev_size == sz);
- /* ... and is fully consolidated */
- assert(prev_inuse(p));
- assert (next == av->top || inuse(next));
-
- /* ... and has minimally sane links */
- assert(p->fd->bk == p);
- assert(p->bk->fd == p);
- }
- else /* markers are always of size SIZE_SZ */
- assert(sz == SIZE_SZ);
-}
-
-/*
- Properties of inuse chunks
-*/
-
-#if __STD_C
-static void do_check_inuse_chunk(mchunkptr p)
-#else
-static void do_check_inuse_chunk(p) mchunkptr p;
-#endif
-{
- mstate av = get_malloc_state();
- mchunkptr next;
- do_check_chunk(p);
-
- if (chunk_is_mmapped(p))
- return; /* mmapped chunks have no next/prev */
-
- /* Check whether it claims to be in use ... */
- assert(inuse(p));
-
- next = next_chunk(p);
-
- /* ... and is surrounded by OK chunks.
- Since more things can be checked with free chunks than inuse ones,
- if an inuse chunk borders them and debug is on, it's worth doing them.
- */
- if (!prev_inuse(p)) {
- /* Note that we cannot even look at prev unless it is not inuse */
- mchunkptr prv = prev_chunk(p);
- assert(next_chunk(prv) == p);
- do_check_free_chunk(prv);
- }
-
- if (next == av->top) {
- assert(prev_inuse(next));
- assert(chunksize(next) >= MINSIZE);
- }
- else if (!inuse(next))
- do_check_free_chunk(next);
-}
-
-/*
- Properties of chunks recycled from fastbins
-*/
-
-#if __STD_C
-static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
-#else
-static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
-#endif
-{
- INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
-
- do_check_inuse_chunk(p);
-
- /* Legal size ... */
- assert((sz & MALLOC_ALIGN_MASK) == 0);
- assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
- /* ... and alignment */
- assert(aligned_OK(chunk2mem(p)));
- /* chunk is less than MINSIZE more than request */
- assert((long)(sz) - (long)(s) >= 0);
- assert((long)(sz) - (long)(s + MINSIZE) < 0);
-}
-
-/*
- Properties of nonrecycled chunks at the point they are malloced
-*/
-
-#if __STD_C
-static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
-#else
-static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
-#endif
-{
- /* same as recycled case ... */
- do_check_remalloced_chunk(p, s);
-
- /*
- ... plus, must obey implementation invariant that prev_inuse is
- always true of any allocated chunk; i.e., that each allocated
- chunk borders either a previously allocated and still in-use
- chunk, or the base of its memory arena. This is ensured
- by making all allocations from the the `lowest' part of any found
- chunk. This does not necessarily hold however for chunks
- recycled via fastbins.
- */
-
- assert(prev_inuse(p));
-}
-
-
-/*
- Properties of malloc_state.
-
- This may be useful for debugging malloc, as well as detecting user
- programmer errors that somehow write into malloc_state.
-
- If you are extending or experimenting with this malloc, you can
- probably figure out how to hack this routine to print out or
- display chunk addresses, sizes, bins, and other instrumentation.
-*/
-
-static void do_check_malloc_state()
-{
- mstate av = get_malloc_state();
- int i;
- mchunkptr p;
- mchunkptr q;
- mbinptr b;
- unsigned int binbit;
- int empty;
- unsigned int idx;
- INTERNAL_SIZE_T size;
- CHUNK_SIZE_T total = 0;
- int max_fast_bin;
-
- /* internal size_t must be no wider than pointer type */
- assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
-
- /* alignment is a power of 2 */
- assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
-
- /* cannot run remaining checks until fully initialized */
- if (av->top == 0 || av->top == initial_top(av))
- return;
-
- /* pagesize is a power of 2 */
- assert((av->pagesize & (av->pagesize-1)) == 0);
-
- /* properties of fastbins */
-
- /* max_fast is in allowed range */
- assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
-
- max_fast_bin = fastbin_index(av->max_fast);
-
- for (i = 0; i < NFASTBINS; ++i) {
- p = av->fastbins[i];
-
- /* all bins past max_fast are empty */
- if (i > max_fast_bin)
- assert(p == 0);
-
- while (p != 0) {
- /* each chunk claims to be inuse */
- do_check_inuse_chunk(p);
- total += chunksize(p);
- /* chunk belongs in this bin */
- assert(fastbin_index(chunksize(p)) == i);
- p = p->fd;
- }
- }
-
- if (total != 0)
- assert(have_fastchunks(av));
- else if (!have_fastchunks(av))
- assert(total == 0);
-
- /* check normal bins */
- for (i = 1; i < NBINS; ++i) {
- b = bin_at(av,i);
-
- /* binmap is accurate (except for bin 1 == unsorted_chunks) */
- if (i >= 2) {
- binbit = get_binmap(av,i);
- empty = last(b) == b;
- if (!binbit)
- assert(empty);
- else if (!empty)
- assert(binbit);
- }
-
- for (p = last(b); p != b; p = p->bk) {
- /* each chunk claims to be free */
- do_check_free_chunk(p);
- size = chunksize(p);
- total += size;
- if (i >= 2) {
- /* chunk belongs in bin */
- idx = bin_index(size);
- assert(idx == i);
- /* lists are sorted */
- if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
- assert(p->bk == b ||
- (CHUNK_SIZE_T)chunksize(p->bk) >=
- (CHUNK_SIZE_T)chunksize(p));
- }
- }
- /* chunk is followed by a legal chain of inuse chunks */
- for (q = next_chunk(p);
- (q != av->top && inuse(q) &&
- (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
- q = next_chunk(q))
- do_check_inuse_chunk(q);
- }
- }
-
- /* top chunk is OK */
- check_chunk(av->top);
-
- /* sanity checks for statistics */
-
- assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
- assert(av->n_mmaps >= 0);
- assert(av->n_mmaps <= av->max_n_mmaps);
-
- assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
- (CHUNK_SIZE_T)(av->max_sbrked_mem));
-
- assert((CHUNK_SIZE_T)(av->mmapped_mem) <=
- (CHUNK_SIZE_T)(av->max_mmapped_mem));
-
- assert((CHUNK_SIZE_T)(av->max_total_mem) >=
- (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem));
-}
-#endif
-
-
-/* ----------- Routines dealing with system allocation -------------- */
-
-/*
- sysmalloc handles malloc cases requiring more memory from the system.
- On entry, it is assumed that av->top does not have enough
- space to service request for nb bytes, thus requiring that av->top
- be extended or replaced.
-*/
-
-#if __STD_C
-static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
-#else
-static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
-#endif
-{
- mchunkptr old_top; /* incoming value of av->top */
- INTERNAL_SIZE_T old_size; /* its size */
- char* old_end; /* its end address */
-
- long size; /* arg to first MORECORE or mmap call */
- char* brk; /* return value from MORECORE */
-
- long correction; /* arg to 2nd MORECORE call */
- char* snd_brk; /* 2nd return val */
-
- INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
- INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
- char* aligned_brk; /* aligned offset into brk */
-
- mchunkptr p; /* the allocated/returned chunk */
- mchunkptr remainder; /* remainder from allocation */
- CHUNK_SIZE_T remainder_size; /* its size */
-
- CHUNK_SIZE_T sum; /* for updating stats */
-
- size_t pagemask = av->pagesize - 1;
-
- /*
- If there is space available in fastbins, consolidate and retry
- malloc from scratch rather than getting memory from system. This
- can occur only if nb is in smallbin range so we didn't consolidate
- upon entry to malloc. It is much easier to handle this case here
- than in malloc proper.
- */
-
- if (have_fastchunks(av)) {
- assert(in_smallbin_range(nb));
- malloc_consolidate(av);
- return mALLOc(nb - MALLOC_ALIGN_MASK);
- }
-
-
-#if HAVE_MMAP
-
- /*
- If have mmap, and the request size meets the mmap threshold, and
- the system supports mmap, and there are few enough currently
- allocated mmapped regions, try to directly map this request
- rather than expanding top.
- */
-
- if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
- (av->n_mmaps < av->n_mmaps_max)) {
-
- char* mm; /* return value from mmap call*/
-
- /*
- Round up size to nearest page. For mmapped chunks, the overhead
- is one SIZE_SZ unit larger than for normal chunks, because there
- is no following chunk whose prev_size field could be used.
- */
- size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
-
- /* Don't try if size wraps around 0 */
- if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
-
- mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
-
- if (mm != (char*)(MORECORE_FAILURE)) {
-
- /*
- The offset to the start of the mmapped region is stored
- in the prev_size field of the chunk. This allows us to adjust
- returned start address to meet alignment requirements here
- and in memalign(), and still be able to compute proper
- address argument for later munmap in free() and realloc().
- */
-
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0) {
- correction = MALLOC_ALIGNMENT - front_misalign;
- p = (mchunkptr)(mm + correction);
- p->prev_size = correction;
- set_head(p, (size - correction) |IS_MMAPPED);
- }
- else {
- p = (mchunkptr)mm;
- p->prev_size = 0;
- set_head(p, size|IS_MMAPPED);
- }
-
- /* update statistics */
-
- if (++av->n_mmaps > av->max_n_mmaps)
- av->max_n_mmaps = av->n_mmaps;
-
- sum = av->mmapped_mem += size;
- if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
- av->max_mmapped_mem = sum;
- sum += av->sbrked_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
- av->max_total_mem = sum;
-
- check_chunk(p);
-
- return chunk2mem(p);
- }
- }
- }
-#endif
-
- /* Record incoming configuration of top */
-
- old_top = av->top;
- old_size = chunksize(old_top);
- old_end = (char*)(chunk_at_offset(old_top, old_size));
-
- brk = snd_brk = (char*)(MORECORE_FAILURE);
-
- /*
- If not the first time through, we require old_size to be
- at least MINSIZE and to have prev_inuse set.
- */
-
- assert((old_top == initial_top(av) && old_size == 0) ||
- ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
- prev_inuse(old_top)));
-
- /* Precondition: not enough current space to satisfy nb request */
- assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
-
- /* Precondition: all fastbins are consolidated */
- assert(!have_fastchunks(av));
-
-
- /* Request enough space for nb + pad + overhead */
-
- size = nb + av->top_pad + MINSIZE;
-
- /*
- If contiguous, we can subtract out existing space that we hope to
- combine with new space. We add it back later only if
- we don't actually get contiguous space.
- */
-
- if (contiguous(av))
- size -= old_size;
-
- /*
- Round to a multiple of page size.
- If MORECORE is not contiguous, this ensures that we only call it
- with whole-page arguments. And if MORECORE is contiguous and
- this is not first time through, this preserves page-alignment of
- previous calls. Otherwise, we correct to page-align below.
- */
-
- size = (size + pagemask) & ~pagemask;
-
- /*
- Don't try to call MORECORE if argument is so big as to appear
- negative. Note that since mmap takes size_t arg, it may succeed
- below even if we cannot call MORECORE.
- */
-
- if (size > 0)
- brk = (char*)(MORECORE(size));
-
- /*
- If have mmap, try using it as a backup when MORECORE fails or
- cannot be used. This is worth doing on systems that have "holes" in
- address space, so sbrk cannot extend to give contiguous space, but
- space is available elsewhere. Note that we ignore mmap max count
- and threshold limits, since the space will not be used as a
- segregated mmap region.
- */
-
-#if HAVE_MMAP
- if (brk == (char*)(MORECORE_FAILURE)) {
-
- /* Cannot merge with old top, so add its size back in */
- if (contiguous(av))
- size = (size + old_size + pagemask) & ~pagemask;
-
- /* If we are relying on mmap as backup, then use larger units */
- if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
- size = MMAP_AS_MORECORE_SIZE;
-
- /* Don't try if size wraps around 0 */
- if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
-
- brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
-
- if (brk != (char*)(MORECORE_FAILURE)) {
-
- /* We do not need, and cannot use, another sbrk call to find end */
- snd_brk = brk + size;
-
- /*
- Record that we no longer have a contiguous sbrk region.
- After the first time mmap is used as backup, we do not
- ever rely on contiguous space since this could incorrectly
- bridge regions.
- */
- set_noncontiguous(av);
- }
- }
- }
-#endif
-
- if (brk != (char*)(MORECORE_FAILURE)) {
- av->sbrked_mem += size;
-
- /*
- If MORECORE extends previous space, we can likewise extend top size.
- */
-
- if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
- set_head(old_top, (size + old_size) | PREV_INUSE);
- }
-
- /*
- Otherwise, make adjustments:
-
- * If the first time through or noncontiguous, we need to call sbrk
- just to find out where the end of memory lies.
-
- * We need to ensure that all returned chunks from malloc will meet
- MALLOC_ALIGNMENT
-
- * If there was an intervening foreign sbrk, we need to adjust sbrk
- request size to account for fact that we will not be able to
- combine new space with existing space in old_top.
-
- * Almost all systems internally allocate whole pages at a time, in
- which case we might as well use the whole last page of request.
- So we allocate enough more memory to hit a page boundary now,
- which in turn causes future contiguous calls to page-align.
- */
-
- else {
- front_misalign = 0;
- end_misalign = 0;
- correction = 0;
- aligned_brk = brk;
-
- /*
- If MORECORE returns an address lower than we have seen before,
- we know it isn't really contiguous. This and some subsequent
- checks help cope with non-conforming MORECORE functions and
- the presence of "foreign" calls to MORECORE from outside of
- malloc or by other threads. We cannot guarantee to detect
- these in all cases, but cope with the ones we do detect.
- */
- if (contiguous(av) && old_size != 0 && brk < old_end) {
- set_noncontiguous(av);
- }
-
- /* handle contiguous cases */
- if (contiguous(av)) {
-
- /*
- We can tolerate forward non-contiguities here (usually due
- to foreign calls) but treat them as part of our space for
- stats reporting.
- */
- if (old_size != 0)
- av->sbrked_mem += brk - old_end;
-
- /* Guarantee alignment of first new chunk made from this space */
-
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0) {
-
- /*
- Skip over some bytes to arrive at an aligned position.
- We don't need to specially mark these wasted front bytes.
- They will never be accessed anyway because
- prev_inuse of av->top (and any chunk created from its start)
- is always true after initialization.
- */
-
- correction = MALLOC_ALIGNMENT - front_misalign;
- aligned_brk += correction;
- }
-
- /*
- If this isn't adjacent to existing space, then we will not
- be able to merge with old_top space, so must add to 2nd request.
- */
-
- correction += old_size;
-
- /* Extend the end address to hit a page boundary */
- end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
- correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
-
- assert(correction >= 0);
- snd_brk = (char*)(MORECORE(correction));
-
- if (snd_brk == (char*)(MORECORE_FAILURE)) {
- /*
- If can't allocate correction, try to at least find out current
- brk. It might be enough to proceed without failing.
- */
- correction = 0;
- snd_brk = (char*)(MORECORE(0));
- }
- else if (snd_brk < brk) {
- /*
- If the second call gives noncontiguous space even though
- it says it won't, the only course of action is to ignore
- results of second call, and conservatively estimate where
- the first call left us. Also set noncontiguous, so this
- won't happen again, leaving at most one hole.
-
- Note that this check is intrinsically incomplete. Because
- MORECORE is allowed to give more space than we ask for,
- there is no reliable way to detect a noncontiguity
- producing a forward gap for the second call.
- */
- snd_brk = brk + size;
- correction = 0;
- set_noncontiguous(av);
- }
-
- }
-
- /* handle non-contiguous cases */
- else {
- /* MORECORE/mmap must correctly align */
- assert(aligned_OK(chunk2mem(brk)));
-
- /* Find out current end of memory */
- if (snd_brk == (char*)(MORECORE_FAILURE)) {
- snd_brk = (char*)(MORECORE(0));
- av->sbrked_mem += snd_brk - brk - size;
- }
- }
-
- /* Adjust top based on results of second sbrk */
- if (snd_brk != (char*)(MORECORE_FAILURE)) {
- av->top = (mchunkptr)aligned_brk;
- set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
- av->sbrked_mem += correction;
-
- /*
- If not the first time through, we either have a
- gap due to foreign sbrk or a non-contiguous region. Insert a
- double fencepost at old_top to prevent consolidation with space
- we don't own. These fenceposts are artificial chunks that are
- marked as inuse and are in any case too small to use. We need
- two to make sizes and alignments work out.
- */
-
- if (old_size != 0) {
- /*
- Shrink old_top to insert fenceposts, keeping size a
- multiple of MALLOC_ALIGNMENT. We know there is at least
- enough space in old_top to do this.
- */
- old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
- set_head(old_top, old_size | PREV_INUSE);
-
- /*
- Note that the following assignments completely overwrite
- old_top when old_size was previously MINSIZE. This is
- intentional. We need the fencepost, even if old_top otherwise gets
- lost.
- */
- chunk_at_offset(old_top, old_size )->size =
- SIZE_SZ|PREV_INUSE;
-
- chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
- SIZE_SZ|PREV_INUSE;
-
- /*
- If possible, release the rest, suppressing trimming.
- */
- if (old_size >= MINSIZE) {
- INTERNAL_SIZE_T tt = av->trim_threshold;
- av->trim_threshold = (INTERNAL_SIZE_T)(-1);
- fREe(chunk2mem(old_top));
- av->trim_threshold = tt;
- }
- }
- }
- }
-
- /* Update statistics */
- sum = av->sbrked_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
- av->max_sbrked_mem = sum;
-
- sum += av->mmapped_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
- av->max_total_mem = sum;
-
- check_malloc_state();
-
- /* finally, do the allocation */
-
- p = av->top;
- size = chunksize(p);
-
- /* check that one of the above allocation paths succeeded */
- if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
- remainder_size = size - nb;
- remainder = chunk_at_offset(p, nb);
- av->top = remainder;
- set_head(p, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- check_malloced_chunk(p, nb);
- return chunk2mem(p);
- }
-
- }
-
- /* catch all failure paths */
- MALLOC_FAILURE_ACTION;
- return 0;
-}
-
-
-
-
-/*
- sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
- to the system (via negative arguments to sbrk) if there is unused
- memory at the `high' end of the malloc pool. It is called
- automatically by free() when top space exceeds the trim
- threshold. It is also called by the public malloc_trim routine. It
- returns 1 if it actually released any memory, else 0.
-*/
-
-#if __STD_C
-static int sYSTRIm(size_t pad, mstate av)
-#else
-static int sYSTRIm(pad, av) size_t pad; mstate av;
-#endif
-{
- long top_size; /* Amount of top-most memory */
- long extra; /* Amount to release */
- long released; /* Amount actually released */
- char* current_brk; /* address returned by pre-check sbrk call */
- char* new_brk; /* address returned by post-check sbrk call */
- size_t pagesz;
-
- pagesz = av->pagesize;
- top_size = chunksize(av->top);
-
- /* Release in pagesize units, keeping at least one page */
- extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
-
- if (extra > 0) {
-
- /*
- Only proceed if end of memory is where we last set it.
- This avoids problems if there were foreign sbrk calls.
- */
- current_brk = (char*)(MORECORE(0));
- if (current_brk == (char*)(av->top) + top_size) {
-
- /*
- Attempt to release memory. We ignore MORECORE return value,
- and instead call again to find out where new end of memory is.
- This avoids problems if first call releases less than we asked,
- of if failure somehow altered brk value. (We could still
- encounter problems if it altered brk in some very bad way,
- but the only thing we can do is adjust anyway, which will cause
- some downstream failure.)
- */
-
- MORECORE(-extra);
- new_brk = (char*)(MORECORE(0));
-
- if (new_brk != (char*)MORECORE_FAILURE) {
- released = (long)(current_brk - new_brk);
-
- if (released != 0) {
- /* Success. Adjust top. */
- av->sbrked_mem -= released;
- set_head(av->top, (top_size - released) | PREV_INUSE);
- check_malloc_state();
- return 1;
- }
- }
- }
- }
- return 0;
-}
-
-/*
- ------------------------------ malloc ------------------------------
-*/
-
-
-#if __STD_C
-Void_t* mALLOc(size_t bytes)
-#else
- Void_t* mALLOc(bytes) size_t bytes;
-#endif
-{
- mstate av = get_malloc_state();
-
- INTERNAL_SIZE_T nb; /* normalized request size */
- unsigned int idx; /* associated bin index */
- mbinptr bin; /* associated bin */
- mfastbinptr* fb; /* associated fastbin */
-
- mchunkptr victim; /* inspected/selected chunk */
- INTERNAL_SIZE_T size; /* its size */
- int victim_index; /* its bin index */
-
- mchunkptr remainder; /* remainder from a split */
- CHUNK_SIZE_T remainder_size; /* its size */
-
- unsigned int block; /* bit map traverser */
- unsigned int bit; /* bit map traverser */
- unsigned int map; /* current word of binmap */
-
- mchunkptr fwd; /* misc temp for linking */
- mchunkptr bck; /* misc temp for linking */
-
- /*
- Convert request size to internal form by adding SIZE_SZ bytes
- overhead plus possibly more to obtain necessary alignment and/or
- to obtain a size of at least MINSIZE, the smallest allocatable
- size. Also, checked_request2size traps (returning 0) request sizes
- that are so large that they wrap around zero when padded and
- aligned.
- */
-
- checked_request2size(bytes, nb);
-
- /*
- Bypass search if no frees yet
- */
- if (!have_anychunks(av)) {
- if (av->max_fast == 0) /* initialization check */
- malloc_consolidate(av);
- goto use_top;
- }
-
- /*
- If the size qualifies as a fastbin, first check corresponding bin.
- */
-
- if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
- fb = &(av->fastbins[(fastbin_index(nb))]);
- if ( (victim = *fb) != 0) {
- *fb = victim->fd;
- check_remalloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
-
- /*
- If a small request, check regular bin. Since these "smallbins"
- hold one size each, no searching within bins is necessary.
- (For a large request, we need to wait until unsorted chunks are
- processed to find best fit. But for small ones, fits are exact
- anyway, so we can check now, which is faster.)
- */
-
- if (in_smallbin_range(nb)) {
- idx = smallbin_index(nb);
- bin = bin_at(av,idx);
-
- if ( (victim = last(bin)) != bin) {
- bck = victim->bk;
- set_inuse_bit_at_offset(victim, nb);
- bin->bk = bck;
- bck->fd = bin;
-
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
-
- /*
- If this is a large request, consolidate fastbins before continuing.
- While it might look excessive to kill all fastbins before
- even seeing if there is space available, this avoids
- fragmentation problems normally associated with fastbins.
- Also, in practice, programs tend to have runs of either small or
- large requests, but less often mixtures, so consolidation is not
- invoked all that often in most programs. And the programs that
- it is called frequently in otherwise tend to fragment.
- */
-
- else {
- idx = largebin_index(nb);
- if (have_fastchunks(av))
- malloc_consolidate(av);
- }
-
- /*
- Process recently freed or remaindered chunks, taking one only if
- it is exact fit, or, if this a small request, the chunk is remainder from
- the most recent non-exact fit. Place other traversed chunks in
- bins. Note that this step is the only place in any routine where
- chunks are placed in bins.
- */
-
- while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
- bck = victim->bk;
- size = chunksize(victim);
-
- /*
- If a small request, try to use last remainder if it is the
- only chunk in unsorted bin. This helps promote locality for
- runs of consecutive small requests. This is the only
- exception to best-fit, and applies only when there is
- no exact fit for a small chunk.
- */
-
- if (in_smallbin_range(nb) &&
- bck == unsorted_chunks(av) &&
- victim == av->last_remainder &&
- (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
-
- /* split and reattach remainder */
- remainder_size = size - nb;
- remainder = chunk_at_offset(victim, nb);
- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
- av->last_remainder = remainder;
- remainder->bk = remainder->fd = unsorted_chunks(av);
-
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
-
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /* remove from unsorted list */
- unsorted_chunks(av)->bk = bck;
- bck->fd = unsorted_chunks(av);
-
- /* Take now instead of binning if exact fit */
-
- if (size == nb) {
- set_inuse_bit_at_offset(victim, size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /* place chunk in bin */
-
- if (in_smallbin_range(size)) {
- victim_index = smallbin_index(size);
- bck = bin_at(av, victim_index);
- fwd = bck->fd;
- }
- else {
- victim_index = largebin_index(size);
- bck = bin_at(av, victim_index);
- fwd = bck->fd;
-
- if (fwd != bck) {
- /* if smaller than smallest, place first */
- if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
- fwd = bck;
- bck = bck->bk;
- }
- else if ((CHUNK_SIZE_T)(size) >=
- (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
-
- /* maintain large bins in sorted order */
- size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
- while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
- fwd = fwd->fd;
- bck = fwd->bk;
- }
- }
- }
-
- mark_bin(av, victim_index);
- victim->bk = bck;
- victim->fd = fwd;
- fwd->bk = victim;
- bck->fd = victim;
- }
-
- /*
- If a large request, scan through the chunks of current bin to
- find one that fits. (This will be the smallest that fits unless
- FIRST_SORTED_BIN_SIZE has been changed from default.) This is
- the only step where an unbounded number of chunks might be
- scanned without doing anything useful with them. However the
- lists tend to be short.
- */
-
- if (!in_smallbin_range(nb)) {
- bin = bin_at(av, idx);
-
- for (victim = last(bin); victim != bin; victim = victim->bk) {
- size = chunksize(victim);
-
- if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
- remainder_size = size - nb;
- unlink(victim, bck, fwd);
-
- /* Exhaust */
- if (remainder_size < MINSIZE) {
- set_inuse_bit_at_offset(victim, size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- /* Split */
- else {
- remainder = chunk_at_offset(victim, nb);
- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
- remainder->bk = remainder->fd = unsorted_chunks(av);
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
- }
- }
-
- /*
- Search for a chunk by scanning bins, starting with next largest
- bin. This search is strictly by best-fit; i.e., the smallest
- (with ties going to approximately the least recently used) chunk
- that fits is selected.
-
- The bitmap avoids needing to check that most blocks are nonempty.
- */
-
- ++idx;
- bin = bin_at(av,idx);
- block = idx2block(idx);
- map = av->binmap[block];
- bit = idx2bit(idx);
-
- for (;;) {
-
- /* Skip rest of block if there are no more set bits in this block. */
- if (bit > map || bit == 0) {
- do {
- if (++block >= BINMAPSIZE) /* out of bins */
- goto use_top;
- } while ( (map = av->binmap[block]) == 0);
-
- bin = bin_at(av, (block << BINMAPSHIFT));
- bit = 1;
- }
-
- /* Advance to bin with set bit. There must be one. */
- while ((bit & map) == 0) {
- bin = next_bin(bin);
- bit <<= 1;
- assert(bit != 0);
- }
-
- /* Inspect the bin. It is likely to be non-empty */
- victim = last(bin);
-
- /* If a false alarm (empty bin), clear the bit. */
- if (victim == bin) {
- av->binmap[block] = map &= ~bit; /* Write through */
- bin = next_bin(bin);
- bit <<= 1;
- }
-
- else {
- size = chunksize(victim);
-
- /* We know the first chunk in this bin is big enough to use. */
- assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
-
- remainder_size = size - nb;
-
- /* unlink */
- bck = victim->bk;
- bin->bk = bck;
- bck->fd = bin;
-
- /* Exhaust */
- if (remainder_size < MINSIZE) {
- set_inuse_bit_at_offset(victim, size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /* Split */
- else {
- remainder = chunk_at_offset(victim, nb);
-
- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
- remainder->bk = remainder->fd = unsorted_chunks(av);
- /* advertise as last remainder */
- if (in_smallbin_range(nb))
- av->last_remainder = remainder;
-
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
- }
-
- use_top:
- /*
- If large enough, split off the chunk bordering the end of memory
- (held in av->top). Note that this is in accord with the best-fit
- search rule. In effect, av->top is treated as larger (and thus
- less well fitting) than any other available chunk since it can
- be extended to be as large as necessary (up to system
- limitations).
-
- We require that av->top always exists (i.e., has size >=
- MINSIZE) after initialization, so if it would otherwise be
- exhuasted by current request, it is replenished. (The main
- reason for ensuring it exists is that we may need MINSIZE space
- to put in fenceposts in sysmalloc.)
- */
-
- victim = av->top;
- size = chunksize(victim);
-
- if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
- remainder_size = size - nb;
- remainder = chunk_at_offset(victim, nb);
- av->top = remainder;
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
-
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /*
- If no space in top, relay to handle system-dependent cases
- */
- return sYSMALLOc(nb, av);
-}
-
-/*
- ------------------------------ free ------------------------------
-*/
-
-#if __STD_C
-void fREe(Void_t* mem)
-#else
-void fREe(mem) Void_t* mem;
-#endif
-{
- mstate av = get_malloc_state();
-
- mchunkptr p; /* chunk corresponding to mem */
- INTERNAL_SIZE_T size; /* its size */
- mfastbinptr* fb; /* associated fastbin */
- mchunkptr nextchunk; /* next contiguous chunk */
- INTERNAL_SIZE_T nextsize; /* its size */
- int nextinuse; /* true if nextchunk is used */
- INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
-
- /* free(0) has no effect */
- if (mem != 0) {
- p = mem2chunk(mem);
- size = chunksize(p);
-
- check_inuse_chunk(p);
-
- /*
- If eligible, place chunk on a fastbin so it can be found
- and used quickly in malloc.
- */
-
- if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
-
-#if TRIM_FASTBINS
- /*
- If TRIM_FASTBINS set, don't place chunks
- bordering top into fastbins
- */
- && (chunk_at_offset(p, size) != av->top)
-#endif
- ) {
-
- set_fastchunks(av);
- fb = &(av->fastbins[fastbin_index(size)]);
- p->fd = *fb;
- *fb = p;
- }
-
- /*
- Consolidate other non-mmapped chunks as they arrive.
- */
-
- else if (!chunk_is_mmapped(p)) {
- set_anychunks(av);
-
- nextchunk = chunk_at_offset(p, size);
- nextsize = chunksize(nextchunk);
-
- /* consolidate backward */
- if (!prev_inuse(p)) {
- prevsize = p->prev_size;
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
- }
-
- if (nextchunk != av->top) {
- /* get and clear inuse bit */
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- set_head(nextchunk, nextsize);
-
- /* consolidate forward */
- if (!nextinuse) {
- unlink(nextchunk, bck, fwd);
- size += nextsize;
- }
-
- /*
- Place the chunk in unsorted chunk list. Chunks are
- not placed into regular bins until after they have
- been given one chance to be used in malloc.
- */
-
- bck = unsorted_chunks(av);
- fwd = bck->fd;
- p->bk = bck;
- p->fd = fwd;
- bck->fd = p;
- fwd->bk = p;
-
- set_head(p, size | PREV_INUSE);
- set_foot(p, size);
-
- check_free_chunk(p);
- }
-
- /*
- If the chunk borders the current high end of memory,
- consolidate into top
- */
-
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- check_chunk(p);
- }
-
- /*
- If freeing a large space, consolidate possibly-surrounding
- chunks. Then, if the total unused topmost memory exceeds trim
- threshold, ask malloc_trim to reduce top.
-
- Unless max_fast is 0, we don't know if there are fastbins
- bordering top, so we cannot tell for sure whether threshold
- has been reached unless fastbins are consolidated. But we
- don't want to consolidate on each free. As a compromise,
- consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
- is reached.
- */
-
- if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
- if (have_fastchunks(av))
- malloc_consolidate(av);
-
-#ifndef MORECORE_CANNOT_TRIM
- if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
- (CHUNK_SIZE_T)(av->trim_threshold))
- sYSTRIm(av->top_pad, av);
-#endif
- }
-
- }
- /*
- If the chunk was allocated via mmap, release via munmap()
- Note that if HAVE_MMAP is false but chunk_is_mmapped is
- true, then user must have overwritten memory. There's nothing
- we can do to catch this error unless DEBUG is set, in which case
- check_inuse_chunk (above) will have triggered error.
- */
-
- else {
-#if HAVE_MMAP
- int ret;
- INTERNAL_SIZE_T offset = p->prev_size;
- av->n_mmaps--;
- av->mmapped_mem -= (size + offset);
- ret = munmap((char*)p - offset, size + offset);
- /* munmap returns non-zero on failure */
- assert(ret == 0);
-#endif
- }
- }
-}
-
-/*
- ------------------------- malloc_consolidate -------------------------
-
- malloc_consolidate is a specialized version of free() that tears
- down chunks held in fastbins. Free itself cannot be used for this
- purpose since, among other things, it might place chunks back onto
- fastbins. So, instead, we need to use a minor variant of the same
- code.
-
- Also, because this routine needs to be called the first time through
- malloc anyway, it turns out to be the perfect place to trigger
- initialization code.
-*/
-
-#if __STD_C
-static void malloc_consolidate(mstate av)
-#else
-static void malloc_consolidate(av) mstate av;
-#endif
-{
- mfastbinptr* fb; /* current fastbin being consolidated */
- mfastbinptr* maxfb; /* last fastbin (for loop control) */
- mchunkptr p; /* current chunk being consolidated */
- mchunkptr nextp; /* next chunk to consolidate */
- mchunkptr unsorted_bin; /* bin header */
- mchunkptr first_unsorted; /* chunk to link to */
-
- /* These have same use as in free() */
- mchunkptr nextchunk;
- INTERNAL_SIZE_T size;
- INTERNAL_SIZE_T nextsize;
- INTERNAL_SIZE_T prevsize;
- int nextinuse;
- mchunkptr bck;
- mchunkptr fwd;
-
- /*
- If max_fast is 0, we know that av hasn't
- yet been initialized, in which case do so below
- */
-
- if (av->max_fast != 0) {
- clear_fastchunks(av);
-
- unsorted_bin = unsorted_chunks(av);
-
- /*
- Remove each chunk from fast bin and consolidate it, placing it
- then in unsorted bin. Among other reasons for doing this,
- placing in unsorted bin avoids needing to calculate actual bins
- until malloc is sure that chunks aren't immediately going to be
- reused anyway.
- */
-
- maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
- fb = &(av->fastbins[0]);
- do {
- if ( (p = *fb) != 0) {
- *fb = 0;
-
- do {
- check_inuse_chunk(p);
- nextp = p->fd;
-
- /* Slightly streamlined version of consolidation code in free() */
- size = p->size & ~PREV_INUSE;
- nextchunk = chunk_at_offset(p, size);
- nextsize = chunksize(nextchunk);
-
- if (!prev_inuse(p)) {
- prevsize = p->prev_size;
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
- }
-
- if (nextchunk != av->top) {
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- set_head(nextchunk, nextsize);
-
- if (!nextinuse) {
- size += nextsize;
- unlink(nextchunk, bck, fwd);
- }
-
- first_unsorted = unsorted_bin->fd;
- unsorted_bin->fd = p;
- first_unsorted->bk = p;
-
- set_head(p, size | PREV_INUSE);
- p->bk = unsorted_bin;
- p->fd = first_unsorted;
- set_foot(p, size);
- }
-
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- }
-
- } while ( (p = nextp) != 0);
-
- }
- } while (fb++ != maxfb);
- }
- else {
- malloc_init_state(av);
- check_malloc_state();
- }
-}
-
-/*
- ------------------------------ realloc ------------------------------
-*/
-
-
-#if __STD_C
-Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
-#else
-Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
-#endif
-{
- mstate av = get_malloc_state();
-
- INTERNAL_SIZE_T nb; /* padded request size */
-
- mchunkptr oldp; /* chunk corresponding to oldmem */
- INTERNAL_SIZE_T oldsize; /* its size */
-
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- Void_t* newmem; /* corresponding user mem */
-
- mchunkptr next; /* next contiguous chunk after oldp */
-
- mchunkptr remainder; /* extra space at end of newp */
- CHUNK_SIZE_T remainder_size; /* its size */
-
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
-
- CHUNK_SIZE_T copysize; /* bytes to copy */
- unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
- INTERNAL_SIZE_T* s; /* copy source */
- INTERNAL_SIZE_T* d; /* copy destination */
-
-
-#ifdef REALLOC_ZERO_BYTES_FREES
- if (bytes == 0) {
- fREe(oldmem);
- return 0;
- }
-#endif
-
- /* realloc of null is supposed to be same as malloc */
- if (oldmem == 0) return mALLOc(bytes);
-
- checked_request2size(bytes, nb);
-
- oldp = mem2chunk(oldmem);
- oldsize = chunksize(oldp);
-
- check_inuse_chunk(oldp);
-
- if (!chunk_is_mmapped(oldp)) {
-
- if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
- /* already big enough; split below */
- newp = oldp;
- newsize = oldsize;
- }
-
- else {
- next = chunk_at_offset(oldp, oldsize);
-
- /* Try to expand forward into top */
- if (next == av->top &&
- (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
- (CHUNK_SIZE_T)(nb + MINSIZE)) {
- set_head_size(oldp, nb);
- av->top = chunk_at_offset(oldp, nb);
- set_head(av->top, (newsize - nb) | PREV_INUSE);
- return chunk2mem(oldp);
- }
-
- /* Try to expand forward into next chunk; split off remainder below */
- else if (next != av->top &&
- !inuse(next) &&
- (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
- (CHUNK_SIZE_T)(nb)) {
- newp = oldp;
- unlink(next, bck, fwd);
- }
-
- /* allocate, copy, free */
- else {
- newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
- if (newmem == 0)
- return 0; /* propagate failure */
-
- newp = mem2chunk(newmem);
- newsize = chunksize(newp);
-
- /*
- Avoid copy if newp is next chunk after oldp.
- */
- if (newp == next) {
- newsize += oldsize;
- newp = oldp;
- }
- else {
- /*
- Unroll copy of <= 36 bytes (72 if 8byte sizes)
- We know that contents have an odd number of
- INTERNAL_SIZE_T-sized words; minimally 3.
- */
-
- copysize = oldsize - SIZE_SZ;
- s = (INTERNAL_SIZE_T*)(oldmem);
- d = (INTERNAL_SIZE_T*)(newmem);
- ncopies = copysize / sizeof(INTERNAL_SIZE_T);
- assert(ncopies >= 3);
-
- if (ncopies > 9)
- MALLOC_COPY(d, s, copysize);
-
- else {
- *(d+0) = *(s+0);
- *(d+1) = *(s+1);
- *(d+2) = *(s+2);
- if (ncopies > 4) {
- *(d+3) = *(s+3);
- *(d+4) = *(s+4);
- if (ncopies > 6) {
- *(d+5) = *(s+5);
- *(d+6) = *(s+6);
- if (ncopies > 8) {
- *(d+7) = *(s+7);
- *(d+8) = *(s+8);
- }
- }
- }
- }
-
- fREe(oldmem);
- check_inuse_chunk(newp);
- return chunk2mem(newp);
- }
- }
- }
-
- /* If possible, free extra space in old or extended chunk */
-
- assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
-
- remainder_size = newsize - nb;
-
- if (remainder_size < MINSIZE) { /* not enough extra to split off */
- set_head_size(newp, newsize);
- set_inuse_bit_at_offset(newp, newsize);
- }
- else { /* split remainder */
- remainder = chunk_at_offset(newp, nb);
- set_head_size(newp, nb);
- set_head(remainder, remainder_size | PREV_INUSE);
- /* Mark remainder as inuse so free() won't complain */
- set_inuse_bit_at_offset(remainder, remainder_size);
- fREe(chunk2mem(remainder));
- }
-
- check_inuse_chunk(newp);
- return chunk2mem(newp);
- }
-
- /*
- Handle mmap cases
- */
-
- else {
-#if HAVE_MMAP
-
-#if HAVE_MREMAP
- INTERNAL_SIZE_T offset = oldp->prev_size;
- size_t pagemask = av->pagesize - 1;
- char *cp;
- CHUNK_SIZE_T sum;
-
- /* Note the extra SIZE_SZ overhead */
- newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
-
- /* don't need to remap if still within same page */
- if (oldsize == newsize - offset)
- return oldmem;
-
- cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
-
- if (cp != (char*)MORECORE_FAILURE) {
-
- newp = (mchunkptr)(cp + offset);
- set_head(newp, (newsize - offset)|IS_MMAPPED);
-
- assert(aligned_OK(chunk2mem(newp)));
- assert((newp->prev_size == offset));
-
- /* update statistics */
- sum = av->mmapped_mem += newsize - oldsize;
- if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
- av->max_mmapped_mem = sum;
- sum += av->sbrked_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
- av->max_total_mem = sum;
-
- return chunk2mem(newp);
- }
-#endif
-
- /* Note the extra SIZE_SZ overhead. */
- if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
- newmem = oldmem; /* do nothing */
- else {
- /* Must alloc, copy, free. */
- newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
- if (newmem != 0) {
- MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
- fREe(oldmem);
- }
- }
- return newmem;
-
-#else
- /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
- check_malloc_state();
- MALLOC_FAILURE_ACTION;
- return 0;
-#endif
- }
-}
-
-/*
- ------------------------------ memalign ------------------------------
-*/
-
-#if __STD_C
-Void_t* mEMALIGn(size_t alignment, size_t bytes)
-#else
-Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
-#endif
-{
- INTERNAL_SIZE_T nb; /* padded request size */
- char* m; /* memory returned by malloc call */
- mchunkptr p; /* corresponding chunk */
- char* brk; /* alignment point within p */
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
- mchunkptr remainder; /* spare room at end to split off */
- CHUNK_SIZE_T remainder_size; /* its size */
- INTERNAL_SIZE_T size;
-
- /* If need less alignment than we give anyway, just relay to malloc */
-
- if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
-
- /* Otherwise, ensure that it is at least a minimum chunk size */
-
- if (alignment < MINSIZE) alignment = MINSIZE;
-
- /* Make sure alignment is power of 2 (in case MINSIZE is not). */
- if ((alignment & (alignment - 1)) != 0) {
- size_t a = MALLOC_ALIGNMENT * 2;
- while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
- alignment = a;
- }
-
- checked_request2size(bytes, nb);
-
- /*
- Strategy: find a spot within that chunk that meets the alignment
- request, and then possibly free the leading and trailing space.
- */
-
-
- /* Call malloc with worst case padding to hit alignment. */
-
- m = (char*)(mALLOc(nb + alignment + MINSIZE));
-
- if (m == 0) return 0; /* propagate failure */
-
- p = mem2chunk(m);
-
- if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
-
- /*
- Find an aligned spot inside chunk. Since we need to give back
- leading space in a chunk of at least MINSIZE, if the first
- calculation places us at a spot with less than MINSIZE leader,
- we can move to the next aligned spot -- we've allocated enough
- total room so that this is always possible.
- */
-
- brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
- -((signed long) alignment)));
- if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
- brk += alignment;
-
- newp = (mchunkptr)brk;
- leadsize = brk - (char*)(p);
- newsize = chunksize(p) - leadsize;
-
- /* For mmapped chunks, just adjust offset */
- if (chunk_is_mmapped(p)) {
- newp->prev_size = p->prev_size + leadsize;
- set_head(newp, newsize|IS_MMAPPED);
- return chunk2mem(newp);
- }
-
- /* Otherwise, give back leader, use the rest */
- set_head(newp, newsize | PREV_INUSE);
- set_inuse_bit_at_offset(newp, newsize);
- set_head_size(p, leadsize);
- fREe(chunk2mem(p));
- p = newp;
-
- assert (newsize >= nb &&
- (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
- }
-
- /* Also give back spare room at the end */
- if (!chunk_is_mmapped(p)) {
- size = chunksize(p);
- if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
- remainder_size = size - nb;
- remainder = chunk_at_offset(p, nb);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_head_size(p, nb);
- fREe(chunk2mem(remainder));
- }
- }
-
- check_inuse_chunk(p);
- return chunk2mem(p);
-}
-
-/*
- ------------------------------ calloc ------------------------------
-*/
-
-#if __STD_C
-Void_t* cALLOc(size_t n_elements, size_t elem_size)
-#else
-Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
-#endif
-{
- mchunkptr p;
- CHUNK_SIZE_T clearsize;
- CHUNK_SIZE_T nclears;
- INTERNAL_SIZE_T* d;
-
- Void_t* mem = mALLOc(n_elements * elem_size);
-
- if (mem != 0) {
- p = mem2chunk(mem);
-
- if (!chunk_is_mmapped(p))
- {
- /*
- Unroll clear of <= 36 bytes (72 if 8byte sizes)
- We know that contents have an odd number of
- INTERNAL_SIZE_T-sized words; minimally 3.
- */
-
- d = (INTERNAL_SIZE_T*)mem;
- clearsize = chunksize(p) - SIZE_SZ;
- nclears = clearsize / sizeof(INTERNAL_SIZE_T);
- assert(nclears >= 3);
-
- if (nclears > 9)
- MALLOC_ZERO(d, clearsize);
-
- else {
- *(d+0) = 0;
- *(d+1) = 0;
- *(d+2) = 0;
- if (nclears > 4) {
- *(d+3) = 0;
- *(d+4) = 0;
- if (nclears > 6) {
- *(d+5) = 0;
- *(d+6) = 0;
- if (nclears > 8) {
- *(d+7) = 0;
- *(d+8) = 0;
- }
- }
- }
- }
- }
-#if ! MMAP_CLEARS
- else
- {
- d = (INTERNAL_SIZE_T*)mem;
- /*
- Note the additional SIZE_SZ
- */
- clearsize = chunksize(p) - 2*SIZE_SZ;
- MALLOC_ZERO(d, clearsize);
- }
-#endif
- }
- return mem;
-}
-
-/*
- ------------------------------ cfree ------------------------------
-*/
-
-#if __STD_C
-void cFREe(Void_t *mem)
-#else
-void cFREe(mem) Void_t *mem;
-#endif
-{
- fREe(mem);
-}
-
-/*
- ------------------------- independent_calloc -------------------------
-*/
-
-#if __STD_C
-Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
-#else
-Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
-#endif
-{
- size_t sz = elem_size; /* serves as 1-element array */
- /* opts arg of 3 means all elements are same size, and should be cleared */
- return iALLOc(n_elements, &sz, 3, chunks);
-}
-
-/*
- ------------------------- independent_comalloc -------------------------
-*/
-
-#if __STD_C
-Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
-#else
-Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
-#endif
-{
- return iALLOc(n_elements, sizes, 0, chunks);
-}
-
-
-/*
- ------------------------------ ialloc ------------------------------
- ialloc provides common support for independent_X routines, handling all of
- the combinations that can result.
-
- The opts arg has:
- bit 0 set if all elements are same size (using sizes[0])
- bit 1 set if elements should be zeroed
-*/
-
-
-#if __STD_C
-static Void_t** iALLOc(size_t n_elements,
- size_t* sizes,
- int opts,
- Void_t* chunks[])
-#else
-static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
-#endif
-{
- mstate av = get_malloc_state();
- INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
- INTERNAL_SIZE_T contents_size; /* total size of elements */
- INTERNAL_SIZE_T array_size; /* request size of pointer array */
- Void_t* mem; /* malloced aggregate space */
- mchunkptr p; /* corresponding chunk */
- INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
- Void_t** marray; /* either "chunks" or malloced ptr array */
- mchunkptr array_chunk; /* chunk for malloced ptr array */
- int mmx; /* to disable mmap */
- INTERNAL_SIZE_T size;
- size_t i;
-
- /* Ensure initialization */
- if (av->max_fast == 0) malloc_consolidate(av);
-
- /* compute array length, if needed */
- if (chunks != 0) {
- if (n_elements == 0)
- return chunks; /* nothing to do */
- marray = chunks;
- array_size = 0;
- }
- else {
- /* if empty req, must still return chunk representing empty array */
- if (n_elements == 0)
- return (Void_t**) mALLOc(0);
- marray = 0;
- array_size = request2size(n_elements * (sizeof(Void_t*)));
- }
-
- /* compute total element size */
- if (opts & 0x1) { /* all-same-size */
- element_size = request2size(*sizes);
- contents_size = n_elements * element_size;
- }
- else { /* add up all the sizes */
- element_size = 0;
- contents_size = 0;
- for (i = 0; i != n_elements; ++i)
- contents_size += request2size(sizes[i]);
- }
-
- /* subtract out alignment bytes from total to minimize overallocation */
- size = contents_size + array_size - MALLOC_ALIGN_MASK;
-
- /*
- Allocate the aggregate chunk.
- But first disable mmap so malloc won't use it, since
- we would not be able to later free/realloc space internal
- to a segregated mmap region.
- */
- mmx = av->n_mmaps_max; /* disable mmap */
- av->n_mmaps_max = 0;
- mem = mALLOc(size);
- av->n_mmaps_max = mmx; /* reset mmap */
- if (mem == 0)
- return 0;
-
- p = mem2chunk(mem);
- assert(!chunk_is_mmapped(p));
- remainder_size = chunksize(p);
-
- if (opts & 0x2) { /* optionally clear the elements */
- MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
- }
-
- /* If not provided, allocate the pointer array as final part of chunk */
- if (marray == 0) {
- array_chunk = chunk_at_offset(p, contents_size);
- marray = (Void_t**) (chunk2mem(array_chunk));
- set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
- remainder_size = contents_size;
- }
-
- /* split out elements */
- for (i = 0; ; ++i) {
- marray[i] = chunk2mem(p);
- if (i != n_elements-1) {
- if (element_size != 0)
- size = element_size;
- else
- size = request2size(sizes[i]);
- remainder_size -= size;
- set_head(p, size | PREV_INUSE);
- p = chunk_at_offset(p, size);
- }
- else { /* the final element absorbs any overallocation slop */
- set_head(p, remainder_size | PREV_INUSE);
- break;
- }
- }
-
-#if DEBUG
- if (marray != chunks) {
- /* final element must have exactly exhausted chunk */
- if (element_size != 0)
- assert(remainder_size == element_size);
- else
- assert(remainder_size == request2size(sizes[i]));
- check_inuse_chunk(mem2chunk(marray));
- }
-
- for (i = 0; i != n_elements; ++i)
- check_inuse_chunk(mem2chunk(marray[i]));
-#endif
-
- return marray;
-}
-
-
-/*
- ------------------------------ valloc ------------------------------
-*/
-
-#if __STD_C
-Void_t* vALLOc(size_t bytes)
-#else
-Void_t* vALLOc(bytes) size_t bytes;
-#endif
-{
- /* Ensure initialization */
- mstate av = get_malloc_state();
- if (av->max_fast == 0) malloc_consolidate(av);
- return mEMALIGn(av->pagesize, bytes);
-}
-
-/*
- ------------------------------ pvalloc ------------------------------
-*/
-
-
-#if __STD_C
-Void_t* pVALLOc(size_t bytes)
-#else
-Void_t* pVALLOc(bytes) size_t bytes;
-#endif
-{
- mstate av = get_malloc_state();
- size_t pagesz;
-
- /* Ensure initialization */
- if (av->max_fast == 0) malloc_consolidate(av);
- pagesz = av->pagesize;
- return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
-}
-
-
-/*
- ------------------------------ malloc_trim ------------------------------
-*/
-
-#if __STD_C
-int mTRIm(size_t pad)
-#else
-int mTRIm(pad) size_t pad;
-#endif
-{
- mstate av = get_malloc_state();
- /* Ensure initialization/consolidation */
- malloc_consolidate(av);
-
-#ifndef MORECORE_CANNOT_TRIM
- return sYSTRIm(pad, av);
-#else
- return 0;
-#endif
-}
-
-
-/*
- ------------------------- malloc_usable_size -------------------------
-*/
-
-#if __STD_C
-size_t mUSABLe(Void_t* mem)
-#else
-size_t mUSABLe(mem) Void_t* mem;
-#endif
-{
- mchunkptr p;
- if (mem != 0) {
- p = mem2chunk(mem);
- if (chunk_is_mmapped(p))
- return chunksize(p) - 2*SIZE_SZ;
- else if (inuse(p))
- return chunksize(p) - SIZE_SZ;
- }
- return 0;
-}
-
-/*
- ------------------------------ mallinfo ------------------------------
-*/
-
-struct mallinfo mALLINFo()
-{
- mstate av = get_malloc_state();
- struct mallinfo mi;
- int i;
- mbinptr b;
- mchunkptr p;
- INTERNAL_SIZE_T avail;
- INTERNAL_SIZE_T fastavail;
- int nblocks;
- int nfastblocks;
-
- /* Ensure initialization */
- if (av->top == 0) malloc_consolidate(av);
-
- check_malloc_state();
-
- /* Account for top */
- avail = chunksize(av->top);
- nblocks = 1; /* top always exists */
-
- /* traverse fastbins */
- nfastblocks = 0;
- fastavail = 0;
-
- for (i = 0; i < NFASTBINS; ++i) {
- for (p = av->fastbins[i]; p != 0; p = p->fd) {
- ++nfastblocks;
- fastavail += chunksize(p);
- }
- }
-
- avail += fastavail;
-
- /* traverse regular bins */
- for (i = 1; i < NBINS; ++i) {
- b = bin_at(av, i);
- for (p = last(b); p != b; p = p->bk) {
- ++nblocks;
- avail += chunksize(p);
- }
- }
-
- mi.smblks = nfastblocks;
- mi.ordblks = nblocks;
- mi.fordblks = avail;
- mi.uordblks = av->sbrked_mem - avail;
- mi.arena = av->sbrked_mem;
- mi.hblks = av->n_mmaps;
- mi.hblkhd = av->mmapped_mem;
- mi.fsmblks = fastavail;
- mi.keepcost = chunksize(av->top);
- mi.usmblks = av->max_total_mem;
- return mi;
-}
-
-/*
- ------------------------------ malloc_stats ------------------------------
-*/
-
-void mSTATs()
-{
- struct mallinfo mi = mALLINFo();
-
-#ifdef WIN32
- {
- CHUNK_SIZE_T free, reserved, committed;
- vminfo (&free, &reserved, &committed);
- fprintf(stderr, "free bytes = %10lu\n",
- free);
- fprintf(stderr, "reserved bytes = %10lu\n",
- reserved);
- fprintf(stderr, "committed bytes = %10lu\n",
- committed);
- }
-#endif
-
-
- fprintf(stderr, "max system bytes = %10lu\n",
- (CHUNK_SIZE_T)(mi.usmblks));
- fprintf(stderr, "system bytes = %10lu\n",
- (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
- fprintf(stderr, "in use bytes = %10lu\n",
- (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
-
-#ifdef WIN32
- {
- CHUNK_SIZE_T kernel, user;
- if (cpuinfo (TRUE, &kernel, &user)) {
- fprintf(stderr, "kernel ms = %10lu\n",
- kernel);
- fprintf(stderr, "user ms = %10lu\n",
- user);
- }
- }
-#endif
-}
-
-
-/*
- ------------------------------ mallopt ------------------------------
-*/
-
-#if __STD_C
-int mALLOPt(int param_number, int value)
-#else
-int mALLOPt(param_number, value) int param_number; int value;
-#endif
-{
- mstate av = get_malloc_state();
- /* Ensure initialization/consolidation */
- malloc_consolidate(av);
-
- switch(param_number) {
- case M_MXFAST:
- if (value >= 0 && value <= MAX_FAST_SIZE) {
- set_max_fast(av, value);
- return 1;
- }
- else
- return 0;
-
- case M_TRIM_THRESHOLD:
- av->trim_threshold = value;
- return 1;
-
- case M_TOP_PAD:
- av->top_pad = value;
- return 1;
-
- case M_MMAP_THRESHOLD:
- av->mmap_threshold = value;
- return 1;
-
- case M_MMAP_MAX:
-#if !HAVE_MMAP
- if (value != 0)
- return 0;
-#endif
- av->n_mmaps_max = value;
- return 1;
-
- default:
- return 0;
- }
-}
-
-
-/*
- -------------------- Alternative MORECORE functions --------------------
-*/
-
-
-/*
- General Requirements for MORECORE.
-
- The MORECORE function must have the following properties:
-
- If MORECORE_CONTIGUOUS is false:
-
- * MORECORE must allocate in multiples of pagesize. It will
- only be called with arguments that are multiples of pagesize.
-
- * MORECORE(0) must return an address that is at least
- MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
-
- else (i.e. If MORECORE_CONTIGUOUS is true):
-
- * Consecutive calls to MORECORE with positive arguments
- return increasing addresses, indicating that space has been
- contiguously extended.
-
- * MORECORE need not allocate in multiples of pagesize.
- Calls to MORECORE need not have args of multiples of pagesize.
-
- * MORECORE need not page-align.
-
- In either case:
-
- * MORECORE may allocate more memory than requested. (Or even less,
- but this will generally result in a malloc failure.)
-
- * MORECORE must not allocate memory when given argument zero, but
- instead return one past the end address of memory from previous
- nonzero call. This malloc does NOT call MORECORE(0)
- until at least one call with positive arguments is made, so
- the initial value returned is not important.
-
- * Even though consecutive calls to MORECORE need not return contiguous
- addresses, it must be OK for malloc'ed chunks to span multiple
- regions in those cases where they do happen to be contiguous.
-
- * MORECORE need not handle negative arguments -- it may instead
- just return MORECORE_FAILURE when given negative arguments.
- Negative arguments are always multiples of pagesize. MORECORE
- must not misinterpret negative args as large positive unsigned
- args. You can suppress all such calls from even occurring by defining
- MORECORE_CANNOT_TRIM,
-
- There is some variation across systems about the type of the
- argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
- actually be size_t, because sbrk supports negative args, so it is
- normally the signed type of the same width as size_t (sometimes
- declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
- matter though. Internally, we use "long" as arguments, which should
- work across all reasonable possibilities.
-
- Additionally, if MORECORE ever returns failure for a positive
- request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
- system allocator. This is a useful backup strategy for systems with
- holes in address spaces -- in this case sbrk cannot contiguously
- expand the heap, but mmap may be able to map noncontiguous space.
-
- If you'd like mmap to ALWAYS be used, you can define MORECORE to be
- a function that always returns MORECORE_FAILURE.
-
- Malloc only has limited ability to detect failures of MORECORE
- to supply contiguous space when it says it can. In particular,
- multithreaded programs that do not use locks may result in
- rece conditions across calls to MORECORE that result in gaps
- that cannot be detected as such, and subsequent corruption.
-
- If you are using this malloc with something other than sbrk (or its
- emulation) to supply memory regions, you probably want to set
- MORECORE_CONTIGUOUS as false. As an example, here is a custom
- allocator kindly contributed for pre-OSX macOS. It uses virtually
- but not necessarily physically contiguous non-paged memory (locked
- in, present and won't get swapped out). You can use it by
- uncommenting this section, adding some #includes, and setting up the
- appropriate defines above:
-
- #define MORECORE osMoreCore
- #define MORECORE_CONTIGUOUS 0
-
- There is also a shutdown routine that should somehow be called for
- cleanup upon program exit.
-
- #define MAX_POOL_ENTRIES 100
- #define MINIMUM_MORECORE_SIZE (64 * 1024)
- static int next_os_pool;
- void *our_os_pools[MAX_POOL_ENTRIES];
-
- void *osMoreCore(int size)
- {
- void *ptr = 0;
- static void *sbrk_top = 0;
-
- if (size > 0)
- {
- if (size < MINIMUM_MORECORE_SIZE)
- size = MINIMUM_MORECORE_SIZE;
- if (CurrentExecutionLevel() == kTaskLevel)
- ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
- if (ptr == 0)
- {
- return (void *) MORECORE_FAILURE;
- }
- // save ptrs so they can be freed during cleanup
- our_os_pools[next_os_pool] = ptr;
- next_os_pool++;
- ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
- sbrk_top = (char *) ptr + size;
- return ptr;
- }
- else if (size < 0)
- {
- // we don't currently support shrink behavior
- return (void *) MORECORE_FAILURE;
- }
- else
- {
- return sbrk_top;
- }
- }
-
- // cleanup any allocated memory pools
- // called as last thing before shutting down driver
-
- void osCleanupMem(void)
- {
- void **ptr;
-
- for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
- if (*ptr)
- {
- PoolDeallocate(*ptr);
- *ptr = 0;
- }
- }
-
-*/
-
-
-/*
- --------------------------------------------------------------
-
- Emulation of sbrk for win32.
- Donated by J. Walter <Walter@GeNeSys-e.de>.
- For additional information about this code, and malloc on Win32, see
- http://www.genesys-e.de/jwalter/
-*/
-
-
-#ifdef WIN32
-
-#ifdef _DEBUG
-/* #define TRACE */
-#endif
-
-/* Support for USE_MALLOC_LOCK */
-#ifdef USE_MALLOC_LOCK
-
-/* Wait for spin lock */
-static int slwait (int *sl) {
- while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
- Sleep (0);
- return 0;
-}
-
-/* Release spin lock */
-static int slrelease (int *sl) {
- InterlockedExchange (sl, 0);
- return 0;
-}
-
-#ifdef NEEDED
-/* Spin lock for emulation code */
-static int g_sl;
-#endif
-
-#endif /* USE_MALLOC_LOCK */
-
-/* getpagesize for windows */
-static long getpagesize (void) {
- static long g_pagesize = 0;
- if (! g_pagesize) {
- SYSTEM_INFO system_info;
- GetSystemInfo (&system_info);
- g_pagesize = system_info.dwPageSize;
- }
- return g_pagesize;
-}
-static long getregionsize (void) {
- static long g_regionsize = 0;
- if (! g_regionsize) {
- SYSTEM_INFO system_info;
- GetSystemInfo (&system_info);
- g_regionsize = system_info.dwAllocationGranularity;
- }
- return g_regionsize;
-}
-
-/* A region list entry */
-typedef struct _region_list_entry {
- void *top_allocated;
- void *top_committed;
- void *top_reserved;
- long reserve_size;
- struct _region_list_entry *previous;
-} region_list_entry;
-
-/* Allocate and link a region entry in the region list */
-static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
- region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
- if (! next)
- return FALSE;
- next->top_allocated = (char *) base_reserved;
- next->top_committed = (char *) base_reserved;
- next->top_reserved = (char *) base_reserved + reserve_size;
- next->reserve_size = reserve_size;
- next->previous = *last;
- *last = next;
- return TRUE;
-}
-/* Free and unlink the last region entry from the region list */
-static int region_list_remove (region_list_entry **last) {
- region_list_entry *previous = (*last)->previous;
- if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
- return FALSE;
- *last = previous;
- return TRUE;
-}
-
-#define CEIL(size,to) (((size)+(to)-1)&~((to)-1))
-#define FLOOR(size,to) ((size)&~((to)-1))
-
-#define SBRK_SCALE 0
-/* #define SBRK_SCALE 1 */
-/* #define SBRK_SCALE 2 */
-/* #define SBRK_SCALE 4 */
-
-/* sbrk for windows */
-static void *sbrk (long size) {
- static long g_pagesize, g_my_pagesize;
- static long g_regionsize, g_my_regionsize;
- static region_list_entry *g_last;
- void *result = (void *) MORECORE_FAILURE;
-#ifdef TRACE
- printf ("sbrk %d\n", size);
-#endif
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Wait for spin lock */
- slwait (&g_sl);
-#endif
- /* First time initialization */
- if (! g_pagesize) {
- g_pagesize = getpagesize ();
- g_my_pagesize = g_pagesize << SBRK_SCALE;
- }
- if (! g_regionsize) {
- g_regionsize = getregionsize ();
- g_my_regionsize = g_regionsize << SBRK_SCALE;
- }
- if (! g_last) {
- if (! region_list_append (&g_last, 0, 0))
- goto sbrk_exit;
- }
- /* Assert invariants */
- assert (g_last);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
- g_last->top_allocated <= g_last->top_committed);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
- g_last->top_committed <= g_last->top_reserved &&
- (unsigned) g_last->top_committed % g_pagesize == 0);
- assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
- assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
- /* Allocation requested? */
- if (size >= 0) {
- /* Allocation size is the requested size */
- long allocate_size = size;
- /* Compute the size to commit */
- long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
- /* Do we reach the commit limit? */
- if (to_commit > 0) {
- /* Round size to commit */
- long commit_size = CEIL (to_commit, g_my_pagesize);
- /* Compute the size to reserve */
- long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
- /* Do we reach the reserve limit? */
- if (to_reserve > 0) {
- /* Compute the remaining size to commit in the current region */
- long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
- if (remaining_commit_size > 0) {
- /* Assert preconditions */
- assert ((unsigned) g_last->top_committed % g_pagesize == 0);
- assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
- /* Commit this */
- void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
- MEM_COMMIT, PAGE_READWRITE);
- /* Check returned pointer for consistency */
- if (base_committed != g_last->top_committed)
- goto sbrk_exit;
- /* Assert postconditions */
- assert ((unsigned) base_committed % g_pagesize == 0);
-#ifdef TRACE
- printf ("Commit %p %d\n", base_committed, remaining_commit_size);
-#endif
- /* Adjust the regions commit top */
- g_last->top_committed = (char *) base_committed + remaining_commit_size;
- }
- } {
- /* Now we are going to search and reserve. */
- int contiguous = -1;
- int found = FALSE;
- MEMORY_BASIC_INFORMATION memory_info;
- void *base_reserved;
- long reserve_size;
- do {
- /* Assume contiguous memory */
- contiguous = TRUE;
- /* Round size to reserve */
- reserve_size = CEIL (to_reserve, g_my_regionsize);
- /* Start with the current region's top */
- memory_info.BaseAddress = g_last->top_reserved;
- /* Assert preconditions */
- assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
- assert (0 < reserve_size && reserve_size % g_regionsize == 0);
- while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
- /* Assert postconditions */
- assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
-#ifdef TRACE
- printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
- memory_info.State == MEM_FREE ? "FREE":
- (memory_info.State == MEM_RESERVE ? "RESERVED":
- (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
-#endif
- /* Region is free, well aligned and big enough: we are done */
- if (memory_info.State == MEM_FREE &&
- (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
- memory_info.RegionSize >= (unsigned) reserve_size) {
- found = TRUE;
- break;
- }
- /* From now on we can't get contiguous memory! */
- contiguous = FALSE;
- /* Recompute size to reserve */
- reserve_size = CEIL (allocate_size, g_my_regionsize);
- memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
- /* Assert preconditions */
- assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
- assert (0 < reserve_size && reserve_size % g_regionsize == 0);
- }
- /* Search failed? */
- if (! found)
- goto sbrk_exit;
- /* Assert preconditions */
- assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
- assert (0 < reserve_size && reserve_size % g_regionsize == 0);
- /* Try to reserve this */
- base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
- MEM_RESERVE, PAGE_NOACCESS);
- if (! base_reserved) {
- int rc = GetLastError ();
- if (rc != ERROR_INVALID_ADDRESS)
- goto sbrk_exit;
- }
- /* A null pointer signals (hopefully) a race condition with another thread. */
- /* In this case, we try again. */
- } while (! base_reserved);
- /* Check returned pointer for consistency */
- if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
- goto sbrk_exit;
- /* Assert postconditions */
- assert ((unsigned) base_reserved % g_regionsize == 0);
-#ifdef TRACE
- printf ("Reserve %p %d\n", base_reserved, reserve_size);
-#endif
- /* Did we get contiguous memory? */
- if (contiguous) {
- long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
- /* Adjust allocation size */
- allocate_size -= start_size;
- /* Adjust the regions allocation top */
- g_last->top_allocated = g_last->top_committed;
- /* Recompute the size to commit */
- to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
- /* Round size to commit */
- commit_size = CEIL (to_commit, g_my_pagesize);
- }
- /* Append the new region to the list */
- if (! region_list_append (&g_last, base_reserved, reserve_size))
- goto sbrk_exit;
- /* Didn't we get contiguous memory? */
- if (! contiguous) {
- /* Recompute the size to commit */
- to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
- /* Round size to commit */
- commit_size = CEIL (to_commit, g_my_pagesize);
- }
- }
- }
- /* Assert preconditions */
- assert ((unsigned) g_last->top_committed % g_pagesize == 0);
- assert (0 < commit_size && commit_size % g_pagesize == 0); {
- /* Commit this */
- void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
- MEM_COMMIT, PAGE_READWRITE);
- /* Check returned pointer for consistency */
- if (base_committed != g_last->top_committed)
- goto sbrk_exit;
- /* Assert postconditions */
- assert ((unsigned) base_committed % g_pagesize == 0);
-#ifdef TRACE
- printf ("Commit %p %d\n", base_committed, commit_size);
-#endif
- /* Adjust the regions commit top */
- g_last->top_committed = (char *) base_committed + commit_size;
- }
- }
- /* Adjust the regions allocation top */
- g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
- result = (char *) g_last->top_allocated - size;
- /* Deallocation requested? */
- } else if (size < 0) {
- long deallocate_size = - size;
- /* As long as we have a region to release */
- while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
- /* Get the size to release */
- long release_size = g_last->reserve_size;
- /* Get the base address */
- void *base_reserved = (char *) g_last->top_reserved - release_size;
- /* Assert preconditions */
- assert ((unsigned) base_reserved % g_regionsize == 0);
- assert (0 < release_size && release_size % g_regionsize == 0); {
- /* Release this */
- int rc = VirtualFree (base_reserved, 0,
- MEM_RELEASE);
- /* Check returned code for consistency */
- if (! rc)
- goto sbrk_exit;
-#ifdef TRACE
- printf ("Release %p %d\n", base_reserved, release_size);
-#endif
- }
- /* Adjust deallocation size */
- deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
- /* Remove the old region from the list */
- if (! region_list_remove (&g_last))
- goto sbrk_exit;
- } {
- /* Compute the size to decommit */
- long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
- if (to_decommit >= g_my_pagesize) {
- /* Compute the size to decommit */
- long decommit_size = FLOOR (to_decommit, g_my_pagesize);
- /* Compute the base address */
- void *base_committed = (char *) g_last->top_committed - decommit_size;
- /* Assert preconditions */
- assert ((unsigned) base_committed % g_pagesize == 0);
- assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
- /* Decommit this */
- int rc = VirtualFree ((char *) base_committed, decommit_size,
- MEM_DECOMMIT);
- /* Check returned code for consistency */
- if (! rc)
- goto sbrk_exit;
-#ifdef TRACE
- printf ("Decommit %p %d\n", base_committed, decommit_size);
-#endif
- }
- /* Adjust deallocation size and regions commit and allocate top */
- deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
- g_last->top_committed = base_committed;
- g_last->top_allocated = base_committed;
- }
- }
- /* Adjust regions allocate top */
- g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
- /* Check for underflow */
- if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
- g_last->top_allocated > g_last->top_committed) {
- /* Adjust regions allocate top */
- g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
- goto sbrk_exit;
- }
- result = g_last->top_allocated;
- }
- /* Assert invariants */
- assert (g_last);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
- g_last->top_allocated <= g_last->top_committed);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
- g_last->top_committed <= g_last->top_reserved &&
- (unsigned) g_last->top_committed % g_pagesize == 0);
- assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
- assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
-
-sbrk_exit:
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Release spin lock */
- slrelease (&g_sl);
-#endif
- return result;
-}
-
-/* mmap for windows */
-static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
- static long g_pagesize;
- static long g_regionsize;
-#ifdef TRACE
- printf ("mmap %d\n", size);
-#endif
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Wait for spin lock */
- slwait (&g_sl);
-#endif
- /* First time initialization */
- if (! g_pagesize)
- g_pagesize = getpagesize ();
- if (! g_regionsize)
- g_regionsize = getregionsize ();
- /* Assert preconditions */
- assert ((unsigned) ptr % g_regionsize == 0);
- assert (size % g_pagesize == 0);
- /* Allocate this */
- ptr = VirtualAlloc (ptr, size,
- MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
- if (! ptr) {
- ptr = (void *) MORECORE_FAILURE;
- goto mmap_exit;
- }
- /* Assert postconditions */
- assert ((unsigned) ptr % g_regionsize == 0);
-#ifdef TRACE
- printf ("Commit %p %d\n", ptr, size);
-#endif
-mmap_exit:
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Release spin lock */
- slrelease (&g_sl);
-#endif
- return ptr;
-}
-
-/* munmap for windows */
-static long munmap (void *ptr, long size) {
- static long g_pagesize;
- static long g_regionsize;
- int rc = MUNMAP_FAILURE;
-#ifdef TRACE
- printf ("munmap %p %d\n", ptr, size);
-#endif
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Wait for spin lock */
- slwait (&g_sl);
-#endif
- /* First time initialization */
- if (! g_pagesize)
- g_pagesize = getpagesize ();
- if (! g_regionsize)
- g_regionsize = getregionsize ();
- /* Assert preconditions */
- assert ((unsigned) ptr % g_regionsize == 0);
- assert (size % g_pagesize == 0);
- /* Free this */
- if (! VirtualFree (ptr, 0,
- MEM_RELEASE))
- goto munmap_exit;
- rc = 0;
-#ifdef TRACE
- printf ("Release %p %d\n", ptr, size);
-#endif
-munmap_exit:
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Release spin lock */
- slrelease (&g_sl);
-#endif
- return rc;
-}
-
-static void vminfo (CHUNK_SIZE_T *free, CHUNK_SIZE_T *reserved, CHUNK_SIZE_T *committed) {
- MEMORY_BASIC_INFORMATION memory_info;
- memory_info.BaseAddress = 0;
- *free = *reserved = *committed = 0;
- while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
- switch (memory_info.State) {
- case MEM_FREE:
- *free += memory_info.RegionSize;
- break;
- case MEM_RESERVE:
- *reserved += memory_info.RegionSize;
- break;
- case MEM_COMMIT:
- *committed += memory_info.RegionSize;
- break;
- }
- memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
- }
-}
-
-static int cpuinfo (int whole, CHUNK_SIZE_T *kernel, CHUNK_SIZE_T *user) {
- if (whole) {
- __int64 creation64, exit64, kernel64, user64;
- int rc = GetProcessTimes (GetCurrentProcess (),
- (FILETIME *) &creation64,
- (FILETIME *) &exit64,
- (FILETIME *) &kernel64,
- (FILETIME *) &user64);
- if (! rc) {
- *kernel = 0;
- *user = 0;
- return FALSE;
- }
- *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
- *user = (CHUNK_SIZE_T) (user64 / 10000);
- return TRUE;
- } else {
- __int64 creation64, exit64, kernel64, user64;
- int rc = GetThreadTimes (GetCurrentThread (),
- (FILETIME *) &creation64,
- (FILETIME *) &exit64,
- (FILETIME *) &kernel64,
- (FILETIME *) &user64);
- if (! rc) {
- *kernel = 0;
- *user = 0;
- return FALSE;
- }
- *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
- *user = (CHUNK_SIZE_T) (user64 / 10000);
- return TRUE;
- }
-}
-
-#endif /* WIN32 */
-
-/* ------------------------------------------------------------
-History:
- V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
- * Fix malloc_state bitmap array misdeclaration
-
- V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
- * Allow tuning of FIRST_SORTED_BIN_SIZE
- * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
- * Better detection and support for non-contiguousness of MORECORE.
- Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
- * Bypass most of malloc if no frees. Thanks To Emery Berger.
- * Fix freeing of old top non-contiguous chunk im sysmalloc.
- * Raised default trim and map thresholds to 256K.
- * Fix mmap-related #defines. Thanks to Lubos Lunak.
- * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
- * Branch-free bin calculation
- * Default trim and mmap thresholds now 256K.
-
- V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
- * Introduce independent_comalloc and independent_calloc.
- Thanks to Michael Pachos for motivation and help.
- * Make optional .h file available
- * Allow > 2GB requests on 32bit systems.
- * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
- Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
- and Anonymous.
- * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
- helping test this.)
- * memalign: check alignment arg
- * realloc: don't try to shift chunks backwards, since this
- leads to more fragmentation in some programs and doesn't
- seem to help in any others.
- * Collect all cases in malloc requiring system memory into sYSMALLOc
- * Use mmap as backup to sbrk
- * Place all internal state in malloc_state
- * Introduce fastbins (although similar to 2.5.1)
- * Many minor tunings and cosmetic improvements
- * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
- * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
- Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
- * Include errno.h to support default failure action.
-
- V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
- * return null for negative arguments
- * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
- * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
- (e.g. WIN32 platforms)
- * Cleanup header file inclusion for WIN32 platforms
- * Cleanup code to avoid Microsoft Visual C++ compiler complaints
- * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
- memory allocation routines
- * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
- * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
- usage of 'assert' in non-WIN32 code
- * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
- avoid infinite loop
- * Always call 'fREe()' rather than 'free()'
-
- V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
- * Fixed ordering problem with boundary-stamping
-
- V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
- * Added pvalloc, as recommended by H.J. Liu
- * Added 64bit pointer support mainly from Wolfram Gloger
- * Added anonymously donated WIN32 sbrk emulation
- * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
- * malloc_extend_top: fix mask error that caused wastage after
- foreign sbrks
- * Add linux mremap support code from HJ Liu
-
- V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
- * Integrated most documentation with the code.
- * Add support for mmap, with help from
- Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
- * Use last_remainder in more cases.
- * Pack bins using idea from colin@nyx10.cs.du.edu
- * Use ordered bins instead of best-fit threshhold
- * Eliminate block-local decls to simplify tracing and debugging.
- * Support another case of realloc via move into top
- * Fix error occuring when initial sbrk_base not word-aligned.
- * Rely on page size for units instead of SBRK_UNIT to
- avoid surprises about sbrk alignment conventions.
- * Add mallinfo, mallopt. Thanks to Raymond Nijssen
- (raymond@es.ele.tue.nl) for the suggestion.
- * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
- * More precautions for cases where other routines call sbrk,
- courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
- * Added macros etc., allowing use in linux libc from
- H.J. Lu (hjl@gnu.ai.mit.edu)
- * Inverted this history list
-
- V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
- * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
- * Removed all preallocation code since under current scheme
- the work required to undo bad preallocations exceeds
- the work saved in good cases for most test programs.
- * No longer use return list or unconsolidated bins since
- no scheme using them consistently outperforms those that don't
- given above changes.
- * Use best fit for very large chunks to prevent some worst-cases.
- * Added some support for debugging
-
- V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
- * Removed footers when chunks are in use. Thanks to
- Paul Wilson (wilson@cs.texas.edu) for the suggestion.
-
- V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
- * Added malloc_trim, with help from Wolfram Gloger
- (wmglo@Dent.MED.Uni-Muenchen.DE).
-
- V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
-
- V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
- * realloc: try to expand in both directions
- * malloc: swap order of clean-bin strategy;
- * realloc: only conditionally expand backwards
- * Try not to scavenge used bins
- * Use bin counts as a guide to preallocation
- * Occasionally bin return list chunks in first scan
- * Add a few optimizations from colin@nyx10.cs.du.edu
-
- V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
- * faster bin computation & slightly different binning
- * merged all consolidations to one part of malloc proper
- (eliminating old malloc_find_space & malloc_clean_bin)
- * Scan 2 returns chunks (not just 1)
- * Propagate failure in realloc if malloc returns 0
- * Add stuff to allow compilation on non-ANSI compilers
- from kpv@research.att.com
-
- V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
- * removed potential for odd address access in prev_chunk
- * removed dependency on getpagesize.h
- * misc cosmetics and a bit more internal documentation
- * anticosmetics: mangled names in macros to evade debugger strangeness
- * tested on sparc, hp-700, dec-mips, rs6000
- with gcc & native cc (hp, dec only) allowing
- Detlefs & Zorn comparison study (in SIGPLAN Notices.)
-
- Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
- * Based loosely on libg++-1.2X malloc. (It retains some of the overall
- structure of old version, but most details differ.)
-
-*/
diff --git a/physmem/mmap.c b/physmem/mmap.c
deleted file mode 100644
index c54a7d2..0000000
--- a/physmem/mmap.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/* mmap.c - A simple mmap for anonymous memory allocations in physmem.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <sys/mman.h>
-
-#include "output.h"
-#include "zalloc.h"
-
-
-void *
-mmap (void *address, size_t length, int protect, int flags,
- int filedes, off_t offset)
-{
- if (address)
- panic ("mmap called with non-zero ADDRESS");
- if (flags != (MAP_PRIVATE | MAP_ANONYMOUS))
- panic ("mmap called with invalid flags");
- if (protect != (PROT_READ | PROT_WRITE))
- panic ("mmap called with invalid protection");
-
- /* At this point, we can safely ignore FILEDES and OFFSET. */
- return (((void *) zalloc (length)) ?: (void *) -1);
-}
-
-
-int
-munmap (void *addr, size_t length)
-{
- zfree ((l4_word_t) addr, length);
- return 0;
-}
diff --git a/physmem/output.c b/physmem/output.c
deleted file mode 100644
index e3a058d..0000000
--- a/physmem/output.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/* output.c - Output routines.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdarg.h>
-
-#include <l4.h>
-
-#include <hurd/wortel.h>
-
-#include "output.h"
-
-
-/* True if debugging is enabled. */
-int output_debug;
-
-
-/* Send a shutdown request to the rootserver wortel. */
-void
-__attribute__((__noreturn__))
-shutdown (void)
-{
- wortel_shutdown ();
-
- while (1)
- l4_sleep (L4_NEVER);
-
- /* NOT REACHED. */
-}
-
-
-/* Print the single character CHR on the output device. */
-int
-putchar (int chr)
-{
- wortel_putchar (chr);
- return 0;
-}
-
-
-int
-puts (const char *str)
-{
- while (*str != '\0')
- putchar (*(str++));
-
- putchar ('\n');
-
- return 0;
-}
-
-
-static void
-print_nr (unsigned long long nr, int base)
-{
- static char *digits = "0123456789abcdef";
- char str[30];
- int i = 0;
-
- do
- {
- str[i++] = digits[nr % base];
- nr = nr / base;
- }
- while (nr);
-
- i--;
- while (i >= 0)
- putchar (str[i--]);
-}
-
-
-static void
-print_signed_nr (long long nr, int base)
-{
- unsigned long long unr;
-
- if (nr < 0)
- {
- putchar ('-');
- unr = -nr;
- }
- else
- unr = nr;
-
- print_nr (unr, base);
-}
-
-
-int
-printf (const char *fmt, ...)
-{
- va_list ap;
-
- va_start (ap, fmt);
- const char *p = fmt;
-
- while (*p != '\0')
- {
- if (*p != '%')
- {
- putchar (*(p++));
- continue;
- }
-
- p++;
- switch (*p)
- {
- case '%':
- putchar ('%');
- p++;
- break;
-
- case 'l':
- p++;
- if (*p != 'l')
- {
- putchar ('%');
- putchar ('l');
- putchar (*(p++));
- continue;
- }
- p++;
- switch (*p)
- {
- case 'o':
- print_nr (va_arg (ap, unsigned long long), 8);
- p++;
- break;
-
- case 'd':
- case 'i':
- print_signed_nr (va_arg (ap, long long), 10);
- p++;
- break;
-
- case 'x':
- case 'X':
- print_nr (va_arg (ap, unsigned long long), 16);
- p++;
- break;
-
- case 'u':
- print_nr (va_arg (ap, unsigned long long), 10);
- p++;
- break;
-
- default:
- putchar ('%');
- putchar ('l');
- putchar ('l');
- putchar (*(p++));
- break;
- }
- break;
-
- case 'o':
- print_nr (va_arg (ap, unsigned int), 8);
- p++;
- break;
-
- case 'd':
- case 'i':
- print_signed_nr (va_arg (ap, int), 10);
- p++;
- break;
-
- case 'x':
- case 'X':
- print_nr (va_arg (ap, unsigned int), 16);
- p++;
- break;
-
- case 'u':
- print_nr (va_arg (ap, unsigned int), 10);
- p++;
- break;
-
- case 'c':
- putchar (va_arg (ap, int));
- p++;
- break;
-
- case 's':
- {
- char *str = va_arg (ap, char *);
- while (*str)
- putchar (*(str++));
- }
- p++;
- break;
-
- case 'p':
- print_nr ((unsigned int) va_arg (ap, void *), 16);
- p++;
- break;
-
- default:
- putchar ('%');
- putchar (*p);
- p++;
- break;
- }
- }
-
- return 0;
-}
diff --git a/physmem/output.h b/physmem/output.h
deleted file mode 100644
index baa3b36..0000000
--- a/physmem/output.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* output.h - Output routines interfaces.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#ifndef _OUTPUT_H
-#define _OUTPUT_H 1
-
-
-/* Print the single character CHR on the output device. */
-int putchar (int chr);
-
-int puts (const char *str);
-
-int printf (const char *fmt, ...);
-
-/* This is not an output function, but it is part of the panic()
- macro. */
-void __attribute__((__noreturn__)) shutdown (void);
-
-
-/* The program name. */
-extern char program_name[];
-
-/* True if debug mode is enabled. */
-extern int output_debug;
-
-/* Print a debug message. */
-#define debug(fmt, ...) \
- ({ \
- if (output_debug) \
- printf ("%s:%s: " fmt, program_name, \
- __FUNCTION__, ##__VA_ARGS__); \
- })
-
-/* Print an error message and fail. */
-#define panic(...) \
- ({ \
- printf ("%s: %s: error: ", program_name, __func__); \
- printf (__VA_ARGS__); \
- putchar ('\n'); \
- shutdown (); \
- })
-
-#endif /* _OUTPUT_H */
diff --git a/physmem/physmem.c b/physmem/physmem.c
deleted file mode 100644
index 0c824a2..0000000
--- a/physmem/physmem.c
+++ /dev/null
@@ -1,283 +0,0 @@
-/* Main function for physical memory server.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <pthread.h>
-
-#include <hurd/wortel.h>
-
-#include "priv.h"
-#include "zalloc.h"
-
-
-/* The program name. */
-char program_name[] = "physmem";
-
-
-/* The following functions are required by pthread. */
-
-void
-__attribute__ ((__noreturn__))
-exit (int __status)
-{
- panic ("exit() called");
-}
-
-
-void
-abort (void)
-{
- panic ("abort() called");
-}
-
-
-/* Initialized in main. */
-l4_thread_id_t wortel_thread_id;
-
-/* FIXME: Hard coded cap ID. */
-wortel_cap_id_t wortel_cap_id = 0;
-
-
-void
-get_all_memory (void)
-{
- l4_fpage_t fpage;
-
- do
- {
- fpage = wortel_get_mem ();
-
- if (fpage != L4_NILPAGE)
- zfree (l4_address (fpage), l4_size (fpage));
- }
- while (fpage != L4_NILPAGE);
-}
-
-
-void
-create_bootstrap_caps (hurd_cap_bucket_t bucket)
-{
- error_t err;
- hurd_cap_handle_t cap;
- struct container *container;
- hurd_cap_obj_t obj;
-
- l4_accept (l4_map_grant_items (L4_COMPLETE_ADDRESS_SPACE));
-
- while (1)
- {
- hurd_task_id_t task_id;
- unsigned int nr_fpages;
- l4_fpage_t fpages[L4_NUM_MRS / 2];
-
- task_id = wortel_get_cap_request (&nr_fpages, fpages);
-
- if (nr_fpages == 0)
- {
- /* This requests the master control capability. */
-
- /* FIXME: Create capability. */
- /* FIXME: Use our control cap for this task here. */
- wortel_get_cap_reply (0xf00);
-
- /* This is the last request made. */
- return;
- }
- else
- {
- debug ("Creating cap for 0x%x:", task_id);
-
- /* Create memory container for the provided grant items. */
- if (nr_fpages == 1 && fpages[0] == L4_NILPAGE)
- {
- /* FIXME: Create control capability for this one
- task. */
- debug ("%s", "Can't create task control capability yet");
- }
- else
- {
- err = container_alloc (nr_fpages, fpages, &container);
- if (err)
- panic ("container_alloc: %i\n", err);
-
- obj = hurd_cap_obj_from_user (struct container *, container);
- hurd_cap_obj_unlock (obj);
-
- err = hurd_cap_bucket_inject (bucket, obj, task_id, &cap);
- if (err)
- panic ("hurd_cap_bucket_inject: %i\n", err);
-
- hurd_cap_obj_lock (obj);
- hurd_cap_obj_drop (obj);
-
- debug (" 0x%x\n", cap);
-
- /* Return CAP. */
- wortel_get_cap_reply (cap);
- }
- }
- }
-}
-
-
-/* Initialize the thread support, and return the L4 thread ID to be
- used for the server thread. */
-static l4_thread_id_t
-setup_threads (void)
-{
- int err;
- pthread_t thread;
- l4_thread_id_t server_thread;
- l4_thread_id_t main_thread;
- l4_word_t extra_threads;
-
- extra_threads = wortel_get_threads ();
- if (extra_threads < 3)
- panic ("at least three extra threads required for physmem");
-
- /* Use the first extra thread as main thread. */
- main_thread = l4_global_id (l4_thread_no (l4_my_global_id ()) + 1,
- l4_version (l4_my_global_id ()));
- server_thread = l4_my_global_id ();
-
- /* Switch threads. We still need the current main thread as the
- server thread. */
- l4_set_pager_of (main_thread, l4_pager ());
- switch_thread (server_thread, main_thread);
-
- /* Create the main thread. */
- err = pthread_create (&thread, 0, 0, 0);
-
- if (err)
- panic ("could not create main thread: %i\n", err);
-
- /* Now add the remaining extra threads to the pool. */
- while (--extra_threads > 0)
- {
- l4_thread_id_t tid;
- tid = l4_global_id (l4_thread_no (l4_my_global_id ()) + extra_threads,
- l4_version (l4_my_global_id ()));
- pthread_pool_add_np (tid);
- }
-
- return server_thread;
-}
-
-
-/* FIXME: Should be elsewhere. Needed by libhurd-slab. */
-int
-getpagesize ()
-{
- return l4_min_page_size ();
-}
-
-
-void *
-physmem_server (void *arg)
-{
- hurd_cap_bucket_t bucket = (hurd_cap_bucket_t) arg;
- error_t err;
-
- /* The physical memory server can run out of threads at a time where
- the task server runs out of memory. To avoid a dead-lock, we
- allocate worker threads asynchronously. */
- hurd_cap_bucket_worker_alloc (bucket, true);
-
- /* No root object is provided by the physmem server. */
- /* FIXME: Use a worker timeout (there is no issue: even if they
- timeout before the task server is up and running, the threads
- will be cached in pthread and are still available for
- allocation). */
- err = hurd_cap_bucket_manage_mt (bucket, NULL, 0, 0);
- if (err)
- debug ("bucket_manage_mt failed: %i\n", err);
-
- panic ("bucket_manage_mt returned!");
-}
-
-
-static void
-bootstrap_final (void)
-{
- l4_thread_id_t task_server;
- hurd_cap_handle_t task_cap;
- l4_thread_id_t deva_server;
- hurd_cap_handle_t deva_cap;
-
- wortel_bootstrap_final (&task_server, &task_cap, &deva_server, &deva_cap);
-
- /* FIXME: Do something with the task cap. */
-}
-
-
-int
-main (int argc, char *argv[])
-{
- error_t err;
- l4_thread_id_t server_thread;
- hurd_cap_bucket_t bucket;
- pthread_t manager;
-
- /* FIXME: Hard coded thread ID. */
- wortel_thread_id = l4_global_id (l4_thread_user_base () + 2, 1);
-
- output_debug = 1;
-
- debug ("%s " PACKAGE_VERSION "\n", program_name);
-
- get_all_memory ();
-
- server_thread = setup_threads ();
-
- err = container_class_init ();
- if (err)
- panic ("container_class_init: %i\n", err);
-
- err = hurd_cap_bucket_create (&bucket);
- if (err)
- panic ("bucket_create: %i\n", err);
-
- frame_entry_init ();
- frame_init ();
-
- create_bootstrap_caps (bucket);
-
- /* Create the server thread and start serving RPC requests. */
- err = pthread_create_from_l4_tid_np (&manager, NULL, server_thread,
- physmem_server, bucket);
- if (err)
- panic ("pthread_create_from_l4_tid_np: %i\n", err);
- pthread_detach (manager);
-
- bootstrap_final ();
-
- /* FIXME: Eventually, add shutdown support on wortels(?)
- request. */
- while (1)
- l4_sleep (L4_NEVER);
-
- return 0;
-}
diff --git a/physmem/physmem.h b/physmem/physmem.h
deleted file mode 100644
index 2747783..0000000
--- a/physmem/physmem.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* physmem.h - Interfaces exported by physmem.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Neal H. Walfield <neal@gnu.org>.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with the GNU Hurd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA. */
-
-#ifndef HURD_PHYSMEM_H
-#define HURD_PHYSMEM_H
-
-/* Execute permission. */
-#define HURD_PM_CONT_EXECUTE (1 << 0)
-/* Write permission. */
-#define HURD_PM_CONT_WRITE (1 << 1)
-/* Read permission. */
-#define HURD_PM_CONT_READ (1 << 2)
-/* Read and write permission. */
-#define HURD_PM_CONT_RW (HURD_PM_CONT_READ|HURD_PM_CONT_WRITE)
-/* Read, write and execute. */
-#define HURD_PM_CONT_RWX (HURD_PM_CONT_RW|HURD_PM_CONT_EXECUTE)
-
-/* Don't copy on write (COW), simply share (a la SYSV SHM). */
-#define HURD_PM_CONT_COPY_SHARED (1 << 3)
-/* Don't copy the region, move it. */
-#define HURD_PM_CONT_COPY_MOVE (1 << 4)
-
-/* Either completely fail or completely succeed: don't partially
- succeed. */
-#define HURD_PM_CONT_ALL_OR_NONE (1 << 8)
-
-/* Do not fail if the specified identifier is already in use.
- Instead, deallocate the current frame and allocate a new one in its
- place. This is useful only to shortcut explicit deallocation
- requests; using it to avoid EEXIST error messages will lead to
- problems as it suggests that the client is not keep track of
- frames. */
-#define HURD_PM_CONT_ALLOC_SQUASH (1 << 9)
-/* Allocate extra frames if needed. */
-#define HURD_PM_CONT_ALLOC_EXTRA (1 << 10)
-/* Only allocate frames suitable for DMA. */
-#define HURD_PM_CONT_ALLOC_DMA (1 << 11)
-
-/* RPC Identifiers. */
-enum
- {
- hurd_pm_container_create_id = 130,
- hurd_pm_container_share_id,
- hurd_pm_container_allocate_id,
- hurd_pm_container_deallocate_id,
- hurd_pm_container_map_id,
- hurd_pm_container_copy_id
- };
-
-#include <hurd/types.h>
-
-/* Memory control object. */
-typedef hurd_cap_handle_t hurd_pm_control_t;
-
-/* Container. */
-typedef hurd_cap_handle_t hurd_pm_container_t;
-
-#endif /* HURD_PHYSMEM_H */
diff --git a/physmem/priv.h b/physmem/priv.h
deleted file mode 100644
index d86357b..0000000
--- a/physmem/priv.h
+++ /dev/null
@@ -1,363 +0,0 @@
-/* physmem.c - Generic definitions.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
- Written by Neal H. Walfield <neal@gnu.org>.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with the GNU Hurd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA. */
-
-#include <errno.h>
-#include <l4.h>
-#include <hurd/btree.h>
-#include <hurd/cap-server.h>
-
-#include <compiler.h>
-
-#include "output.h"
-
-
-/* The program name. */
-extern char program_name[];
-
-#define BUG_ADDRESS "<bug-hurd@gnu.org>"
-
-int main (int argc, char *argv[]);
-
-
-/* Extract the L4 access rights from FLAGS. */
-static inline l4_word_t
-extract_access (l4_word_t flags)
-{
- return flags & L4_FPAGE_FULLY_ACCESSIBLE;
-}
-
-/* The following function must be defined by the architecture
- dependent code. */
-
-/* Switch execution transparently to thread TO. The thread FROM,
- which must be the current thread, will be halted. */
-void switch_thread (l4_thread_id_t from, l4_thread_id_t to);
-
-
-/* Return true if INDEX lies within (START, START+SIZE-1)
- inclusive. */
-static bool
-within (l4_word_t index, l4_word_t start, l4_word_t size)
-{
- return index >= start && index < start + size;
-}
-
-/* Return true if (INDEX1, INDEX1+SIZE1-1) inclusive overlaps with
- (INDEX2, INDEX2+SIZE2-1) inclusive. */
-static bool
-overlap (l4_word_t index1, l4_word_t size1, l4_word_t index2, l4_word_t size2)
-{
- return
- /* Is the start of the first region within the second?
- 2 1 2 1
- or 2 1 1 2 */
- within (index1, index2, size2)
- /* Is the end of the first region within the second?
- 1 2 1 2
- or 2 1 1 2 */
- || within (index1 + size1 - 1, index2, size2)
- /* Is start of the second region within the first?
- 1 2 1 2
- or 1 2 2 1 */
- || within (index2, index1, size1);
-
- /* We have implicitly checked if the end of the second region is
- within the first (i.e. within (index2 + size2 - 1, index1, size1))
- 2 1 2 1
- or 1 2 2 1
- in check 1 and check 3. */
-}
-
-/* A region of memory. */
-struct region
-{
- /* Start of the region. */
- uintptr_t start;
- /* And its extent. */
- size_t size;
-};
-
-static inline int
-region_compare (const struct region *a, const struct region *b)
-{
- if (overlap (a->start, a->size, b->start, b->size))
- return 0;
- else
- return a->start - b->start;
-}
-
-/* Forward. */
-struct frame_entry;
-
-/* A frame referrs directly to physical memory. Exactly one frame
- structure refers to each piece of allocated (to users, i.e. not
- internal) physical memory. */
-struct frame
-{
- /* Lock for all members as well as all frame entries using this
- frame. */
- pthread_mutex_t lock;
-
- /* One reference per frame entry plus any active users. */
- int refs;
-
- /* The physical memory allocated to this frame. This is allocated
- lazily. If the address portion is 0, memory has not yet been
- allocated. */
- l4_fpage_t memory;
-
- /* The types of mappings which have been made since the last time
- this frame was unmapped. This does not mean that it actually is
- mapped as users can unmap it themselves. */
- l4_word_t may_be_mapped;
-
- /* Number of extant copy on writes. */
- int cow;
-
- /* List of frame entries referring to this frame. */
- struct frame_entry *frame_entries;
-};
-
-/* Regions in containers refer to physical memory. Multiple regions
- may refer to the same phsyical memory (thereby allowing sharing and
- COW). Every region has its own frame entry which contains the
- per-region state. FRAME refers to the physical memory. */
-struct frame_entry
-{
- /* The following fields are locked by the containing container's
- lock. */
-
- /* The container of which this frame entry is a part. */
- struct container *container;
- /* The name of this region within the containing container. */
- struct region region;
- hurd_btree_node_t node;
-
- /* The following fields are lock by FRAME->lock. */
-
- /* The physical memory backing this region. */
- struct frame *frame;
-
- /* The frame entry may not reference all of the physical memory in
- FRAME (due to partial sharing, etc). This is the offset to the
- start of the memory which this frame entry uses. */
- size_t frame_offset;
-
- /* The list entry for FRAME's list of frame entries referring to
- itself. */
- struct frame_entry *next;
- struct frame_entry **prevp;
-
- /* A circular list of frame entries which share a copy of the
- frame. */
- struct frame_entry *shared_next;
- struct frame_entry **shared_prevp;
-};
-
-BTREE_CLASS(frame_entry, struct frame_entry, struct region, region,
- node, region_compare)
-
-struct container
-{
- pthread_mutex_t lock;
- /* List of allocate frames in this container. */
- hurd_btree_frame_entry_t frame_entries;
-};
-
-/* Initialize the frame subsystem. */
-extern void frame_entry_init (void);
-
-/* Allocate an uninitialized frame entry structure. Return NULL if
- there is insufficient memory. */
-extern struct frame_entry *frame_entry_alloc (void);
-
-/* Deallocate frame entry FRAME_ENTRY. NB: this function does not
- deinitialize any resources FRAME_ENTRY may still reference. It is
- the dual of frame_entry_alloc. */
-extern void frame_entry_free (struct frame_entry *frame_entry);
-
-/* Initialize the previously uninitialized frame entry structure
- FRAME_ENTRY to cover the region starting at byte START and
- extending SIZE bytes on container CONT. SIZE must be a power of 2.
- CONT must be locked. Physical memory is reserved, however, it is
- not allocated until a frame is attached and that frame is bound
- using frame_memory_bind. FRAME_ENTRY->FRAME is locked.
-
- If the specified region overlaps with any in the container, EEXIST
- is returned. */
-extern error_t frame_entry_create (struct container *cont,
- struct frame_entry *frame_entry,
- uintptr_t start, size_t size);
-
-/* Initialize the previously uninitialized frame entry structure
- FRAME_ENTRY to cover the region starting at byte START and
- extending SIZE bytes in container CONT. FRAME_ENTRY refers to the
- physical memory in SOURCE starting at offset FRAME_OFFSET relative
- to the base of SOURCE->FRAME. If SHARED_MEMORY is true, the
- physical memory is shared otherwise, a copy is marked COW. SIZE
- must be a power of 2. FRAME_OFFSET must be a multiple of SIZE.
- CONT must be locked. FRAME must be locked. A reference is added
- to FRAME.
-
- If the specified region overlaps with any in the container, EEXIST
- is returned. */
-extern error_t frame_entry_copy (struct container *cont,
- struct frame_entry *frame_entry,
- uintptr_t start, size_t size,
- struct frame_entry *source,
- size_t frame_offset,
- bool shared_memory);
-
-/* Initialize the previously uninitialized frame entry structure
- FRAME_ENTRY to cover the region starting at byte START and
- extending SIZE bytes in container CONT. FRAME_ENTRY refers to the
- physical memory in SOURCE starting at offset FRAME_OFFSET relative
- to the base of SOURCE->FRAME. If SHARED_MEMORY is true, the
- physical memory is shared otherwise, a copy is marked COW. SIZE
- must be a power of 2. FRAME_OFFSET must be a multiple of SIZE.
- CONT must be locked. FRAME must be locked. A reference is added
- to FRAME.
-
- If the specified region overlaps with any in the container, EEXIST
- is returned. */
-extern error_t frame_entry_use (struct container *cont,
- struct frame_entry *frame_entry,
- uintptr_t start, size_t size,
- struct frame *frame,
- size_t frame_offset);
-
-/* Deinitialize frame entry FRAME_ENTRY. If CONT is NULL, FRAME_ENTRY
- has already been detached from any container. Otherwise,
- FRAME_ENTRY is deattached from the locked container CONT. Drops a
- reference to the underlying frame. FRAME_ENTRY->LOCK must be held
- and is unlocked if DO_UNLOCK_FRAME is true, otherwise it remains
- locked on return (in which case the caller must still have a
- reference to FRAME_ENTRY->FRAME). This does *not* deallocate
- FRAME_ENTRY which must still be done by calling
- frame_entry_free. */
-extern void frame_entry_destroy (struct container *cont,
- struct frame_entry *frame_entry,
- bool do_unlock_frame);
-
-/* Find a frame entry in container CONT which overlaps with the region
- START+SIZE and return it. Returns NULL if no frame entry in CONT
- overlaps with the provided region. CONT must be locked. */
-extern struct frame_entry *frame_entry_find (struct container *cont,
- uintptr_t start,
- size_t size);
-
-/* Append map items to the message MSG with access ACCESS for the LEN
- bytes of corresponding to the memory underlying FRAME_ENTRY
- starting at byte START (relative to the base of FRAME_ENTRY) to be
- mapped at virtual memory address VADDR. If AMOUNT is not-NULL, the
- number of bytes for which map items could be created is placed in
- *AMOUNT. If not there is not space for map items to cover all LEN
- bytes, ENOSPC is returned. */
-extern error_t frame_entry_map (struct frame_entry *frame_entry,
- size_t start, size_t len, int access,
- uintptr_t vaddr, l4_msg_t msg,
- size_t *amount);
-
-/* Deallocate part (or all) of FE which is in container CONT.
- CONT_START is the start of the region to deallocate in terms of the
- container which FE must cover. LENGTH is the number of bytes to
- deallocate all of which FE must cover. CONT must be lock.
- FE->FRAME must be lock. FE->FRAME is implicitly unlocked on
- return. */
-extern error_t frame_entry_deallocate (struct container *cont,
- struct frame_entry *fe,
- uintptr_t cont_start,
- size_t length);
-
-/* Initialize the frame subsystem. */
-extern void frame_init (void);
-
-/* Allocate a frame structure holding SIZE bytes. Physical memory
- must have already been reserved (by, e.g. a prior frame_entry_alloc
- call). Allocation of the physical memory is deferred until
- frame_memory_alloc is called. The returned frame has a single
- reference and is locked. */
-extern struct frame *frame_alloc (size_t size);
-
-/* Bind frame FRAME to physical memory if not already done. */
-static inline void
-frame_memory_bind (struct frame *frame)
-{
- /* Actually allocates the physical memory. */
- extern void frame_memory_alloc (struct frame *frame);
-
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
- if (! l4_address (frame->memory))
- frame_memory_alloc (frame);
-}
-
-/* Add a reference to frame FRAME. */
-static inline void
-frame_ref (struct frame *frame)
-{
- frame->refs ++;
-}
-
-/* Release a reference to frame FRAME. FRAME must be locked. When
- the last reference is removed, any exant client mappings will be
- unmapped, any physical memory will be deallocated and FRAME will be
- freed. */
-extern void frame_deref (struct frame *frame);
-
-/* Release a reference to frame FRAME. FRAME must be locked. The
- caller must hold at least one reference in addition to the one it
- wants to release. FRAME is not unlocked. */
-static inline void
-frame_release (struct frame *frame)
-{
- assert (pthread_mutex_trylock (&frame->lock) == EBUSY);
- assert (frame->refs > 1);
- frame->refs --;
-}
-
-/* Add FRAME_ENTRY as a user of FRAME. FRAME_ENTRY must hold a
- reference to FRAME as long as it uses it. (This function does not
- allocate a reference.) FRAME must be locked. */
-extern void frame_add_user (struct frame *frame,
- struct frame_entry *frame_entry);
-
-/* Remove FRAME_ENTRY as a user of FRAME. */
-extern void frame_drop_user (struct frame *frame,
- struct frame_entry *frame_entry);
-
-/* Attach frame entry FRAME_ENTRY to container CONT. FRAME_ENTRY must
- not currently be part of any container. CONT must be locked.
- Returns EEXIST if FRAME_ENTRY overlaps with a frame entry in
- CONT. */
-extern error_t container_attach (struct container *cont,
- struct frame_entry *frame_entry);
-
-/* Detach frame entry FRAME_ENTRY from container CONT. CONT must be
- locked. After returning, FRAME_ENTRY->CONTAINER must no longer be
- used. */
-extern void container_detach (struct container *cont,
- struct frame_entry *frame_entry);
-
-/* Allocate a new container object covering the NR_FPAGES fpages
- listed in FPAGES. The object returned is locked and has one
- reference. */
-extern error_t container_alloc (l4_word_t nr_fpages, l4_word_t *fpages,
- struct container **r_container);
diff --git a/physmem/zalloc.c b/physmem/zalloc.c
deleted file mode 100644
index 93abfd3..0000000
--- a/physmem/zalloc.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/* Zone allocator for physical memory server.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Neal H Walfield.
- Modified by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <assert.h>
-#include <string.h>
-
-#include "output.h"
-
-#include "zalloc.h"
-
-/* Zalloc: A fast zone allocator. This is not a general purpose
- allocator. If you attempt to use it as such, you will find that it
- is very inefficient. It is, however, designed to be very fast and
- to be used as a base for building a more general purpose allocator.
-
- Memory is kept in zones. Zones are of sizes 2 ** N and all memory
- is aligned on a similar boundary. Typically, the smallest zone
- will be the system page size. Memory of any size can be added to
- the pool as long as it is a multiple of the smallest zone: it is
- broken up as necessary.
-
- Memory can be added to the pool by calling the zfree function with
- the address of the buffer and its size. The buffer is broken up as
- a function of its alignment and size using the buddy system (as
- described by e.g. Knuth). Consider the following: zfree (4k, 16k).
- This says that a buffer of size 16k starting at address 4k should
- be added to the system. Although the size of the buffer is a power
- of 2 (2 ** 14 = 16k), it cannot be added to the 16k zone: it has
- the wrong alignment. Instead, the initial 4k are broken off, added
- to the 4k zone, the next 8k to the 8k zone and the final 4k to the
- 4k zone. If, as memory is added to a zone, its buddy is present,
- the two buffers are buddied up and promoted to the next zone. For
- instance, if the 4k buffer at address 20k was present during the
- previous zfree, the bufer at 16k would have been combined with this
- and the new larger buffer would have been added to the 8k zone.
-
- When allocating memory, the smallest zone that is larger than or
- equal to the desired size is selected. If the zone is exhausted,
- the allocator will look in the next larger zone and break up a
- buffer to satisfy the request. This continues recursively if
- necessary. If the desired size is smaller than the buffer that is
- selected, the difference is returned to the system. For instance,
- if an allocation request of 12k is made, the system will start
- looking in the 16k zone. If it finds that that zone is exhausted,
- it will select a buffer from the 32k zone and place the top half in
- the 16k zone and use the lower half for the allocation. However,
- as this is 4k too much, the extra is returned to the 4k zone.
-
- When making allocations, the system will not look for adjacent
- memory blocks: if an allocation request of e.g. 8k is issued and
- there is no memory in the 8k zones and above, the 4k zone will not
- be searched for false buddies. That is, if in the 4k zone there is
- a buffer starting at 4k and 8k, the allocator will make no effort
- to search for them. Note that they could not have been combined
- during the zfree as 4k's buddy is at 0k and 8k's buddy is at
- 12k. */
-
-
-/* A free block list ordered by address. Blocks are of size 2 ** N
- and aligned on a similar boundary. Since the contents of a block
- does not matter (it is free), the block itself contains this
- structure at its start address. */
-struct block
-{
- struct block *next;
- struct block *prev;
-};
-
-
-/* Given a zone, return its size. */
-#define ZONE_SIZE(x) (1 << ((x) + L4_MIN_PAGE_SIZE_LOG2))
-
-/* Number of zones in the system. */
-#define ZONES (sizeof (L4_Word_t) * 8 - L4_MIN_PAGE_SIZE_LOG2)
-
-/* The zones. */
-static struct block *zone[ZONES] = { 0, };
-
-
-/* Add the block BLOCK to the zone ZONE_NR. The block has the
- right size and alignment. Buddy up if possible. */
-static inline void
-add_block (struct block *block, unsigned int zone_nr)
-{
- while (1)
- {
- struct block *left = 0;
- struct block *right = zone[zone_nr];
-
- /* Find the left and right neighbours of BLOCK. */
- while (right && block > right)
- {
- left = right;
- right = right->next;
- }
-
- if (left && (((l4_word_t) left) ^ ((l4_word_t) block))
- == ZONE_SIZE (zone_nr))
- {
- /* Buddy on the left. */
-
- /* Remove left neighbour. */
- if (left->prev)
- left->prev->next = left->next;
- else
- zone[zone_nr] = left->next;
- if (left->next)
- left->next->prev = left->prev;
-
- block = left;
- zone_nr++;
- }
- else if (right && (((l4_word_t) right) ^ ((l4_word_t) block))
- == ZONE_SIZE (zone_nr))
- {
- /* Buddy on the right. */
-
- /* Remove right neighbour from the list. */
- if (right->prev)
- right->prev->next = right->next;
- else
- zone[zone_nr] = right->next;
- if (right->next)
- right->next->prev = right->prev;
-
- zone_nr++;
- }
- else
- {
- /* Could not coalesce. Just insert. */
-
- block->next = right;
- if (block->next)
- block->next->prev = block;
-
- block->prev = left;
- if (block->prev)
- block->prev->next = block;
- else
- zone[zone_nr] = block;
-
- /* This is the terminating case. */
- break;
- }
- }
-}
-
-
-/* Add the block BLOCK of size SIZE to the pool. BLOCK must be
- aligned to the system's minimum page size. SIZE must be a multiple
- of the system's minimum page size. */
-void
-zfree (l4_word_t block, l4_word_t size)
-{
- l4_word_t min_page_size = l4_min_page_size ();
-
- // debug ("freeing block 0x%x - 0x%x\n", block, block + size);
-
- if (size & (min_page_size - 1))
- panic ("%s: size 0x%x of freed block 0x%x is not a multiple of "
- "minimum page size", __func__, size, block);
-
- if (block & (min_page_size - 1))
- panic ("%s: freed block 0x%x of size 0x%x is not aligned to "
- "minimum page size", __func__, block, size);
-
- do
- {
- /* All blocks must be stored aligned to their size. */
- unsigned int block_align = l4_lsb (block) - 1;
- unsigned int size_align = l4_msb (size) - 1;
- unsigned int zone_nr = (block_align < size_align
- ? block_align : size_align)
- - L4_MIN_PAGE_SIZE_LOG2;
-
- add_block ((struct block *) block, zone_nr);
-
- block += ZONE_SIZE (zone_nr);
- size -= ZONE_SIZE (zone_nr);
- }
- while (size > 0);
-}
-
-
-/* Allocate a block of memory of size SIZE and return its address.
- SIZE must be a multiple of the system's minimum page size. If no
- block of the required size could be allocated, return 0. */
-l4_word_t
-zalloc (l4_word_t size)
-{
- l4_word_t min_page_size = l4_min_page_size ();
- unsigned int zone_nr;
- struct block *block;
-
- // debug ("request for 0x%x bytes\n", size);
-
- if (size & (min_page_size - 1))
- panic ("%s: requested size 0x%x is not a multiple of "
- "minimum page size", __func__, size);
-
- /* Calculate the logarithm to base two of SIZE rounded up to the
- nearest power of two (actually, the MSB function returns one more
- than the logarithm to base two of its argument, rounded down to
- the nearest power of two - this is the same except for the border
- case where only one bit is set. To adjust for this border case,
- we subtract one from the argument to the MSB function). Calculate
- the zone number by subtracting page shift. */
- zone_nr = l4_msb (size - 1) - L4_MIN_PAGE_SIZE_LOG2;
-
- /* Find the smallest zone which fits the request and has memory
- available. */
- while (!zone[zone_nr] && zone_nr < ZONES)
- zone_nr++;
-
- if (zone_nr == ZONES)
- return 0;
-
- /* Found a zone. Now bite off the beginning of the first block in
- this zone. */
- block = zone[zone_nr];
-
- zone[zone_nr] = block->next;
- if (zone[zone_nr])
- zone[zone_nr]->prev = 0;
-
- /* And donate back the remainder of this block, if any. */
- if (ZONE_SIZE (zone_nr) > size)
- zfree (((l4_word_t) block) + size, ZONE_SIZE (zone_nr) - size);
-
- /* Zero out the newly allocated block. */
- memset (block, 0, size);
-
- return (l4_word_t) block;
-}
-
-
-/* Dump the internal data structures. */
-#ifndef NDEBUG
-void
-zalloc_dump_zones (const char *prefix)
-{
- l4_word_t min_page_size = l4_min_page_size ();
- int i;
- struct block *block;
- l4_word_t available = 0;
- int print_empty = 0;
-
- for (i = ZONES - 1; ZONE_SIZE (i) >= min_page_size; i--)
- if (zone[i] || print_empty)
- {
- print_empty = 1;
- printf ("%s: 0x%x: { ", prefix, ZONE_SIZE (i));
- for (block = zone[i]; block; block = block->next)
- {
- available += ZONE_SIZE (i);
- printf ("%p%s", block, (block->next ? ", " : " "));
- }
- printf ("}\n");
- }
-
- printf ("%s: %llu (0x%llx) bytes available\n", prefix,
- (unsigned long long) available, (unsigned long long) available);
-}
-#endif
diff --git a/physmem/zalloc.h b/physmem/zalloc.h
deleted file mode 100644
index caea4a4..0000000
--- a/physmem/zalloc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Zone allocator for physical memory server.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Neal H Walfield.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef __ZALLOC_H__
-#define __ZALLOC_H__
-
-#include <l4.h>
-
-/* Add to the pool the block BLOCK of size SIZE. BLOCK must be
- aligned to the system's minimum page size. SIZE must be a multiple
- of the system's minimum page size. */
-void zfree (l4_word_t block, l4_word_t size);
-
-/* Allocate a block of memory of size SIZE. SIZE must be a multiple
- of the system's minimum page size. */
-l4_word_t zalloc (l4_word_t size);
-
-/* Dump some internal data structures. Only defined if zalloc was
- compiled without NDEBUG defined. */
-void zalloc_dump_zones (const char *prefix);
-
-#endif /* __ZALLOC_H__ */
diff --git a/task/ChangeLog b/task/ChangeLog
deleted file mode 100644
index 5f515f3..0000000
--- a/task/ChangeLog
+++ /dev/null
@@ -1,142 +0,0 @@
-2005-04-06 Neal H. Walfield <neal@gnu.org>
-
- * mmap.c: Include <hurd/anonymous.h>.
- (mmap): Call hurd_anonymous_allocate, not hurd_vm_allocate.
- (munmap): Call hurd_vm_release, not hurd_vm_deallocate.
-
-2005-01-11 Neal H. Walfield <neal@gnu.org>
-
- * Makefile.am (task_SOURCES): Remove physmem-user.h and
- physmem-user.c.
- (task_LDADD): Add ../libhurd-btree/libhurd-btree.a and
- ../libhurd-mm/libhurd-mm.a.
- * ia32-cmain.c: Include <hurd/mm.h>.
- (pager_tid): New global variable.
- (cmain): Allocate a thread that the memory management subsystem
- can use as the pager thread. Call hurd_mm_init. Set the
- PAGER_TID as the current thread's pager.
- * mmap.c: Rewrite to use new interfaces.
- * physmem-user.h: Remove obsolete file.
- * physmem-user.c: Likewise.
-
-2005-01-07 Neal H. Walfield <neal@gnu.org>
-
- * output.h (debug): Preface __VA_ARGS__ with ## thereby making it
- optional.
-
-2005-01-07 Neal H. Walfield <neal@gnu.org>
-
- * thread.c (threads): Supply the allocate_buffer and
- deallocate_buffer arguments to HURD_SLAB_SPACE_INITIALIZER to
- conform with the new semantics.
-
-2004-12-01 Neal H. Walfield <neal@gnu.org>
-
- * physmem-user.h (physmem_map): Change CONT from a hurd_cap_id_t
- to a hurd_cap_handle_t.
- * physmem-user.c (physmem_map): Likewise.
-
-2004-11-17 Neal H. Walfield <neal@gnu.org>
-
- * Makefile.am (bootdir): New variable.
- (boot_PROGRAMS): Use this instead of noinst_PROGRAMS.
-
-2004-11-17 Neal H. Walfield <neal@gnu.org>
-
- * output.h (debug): Include program_name and __FUNCTION__ in
- output.
-
-2004-11-02 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (task_SOURCES): Add thread.c.
- * thread.c: New file.
- * task.h (struct thread): New structure.
- (thread_t): New typedef.
- (thread_set_range, thread_alloc_with_id, thread_alloc,
- thread_dealloc): New prototype.
- (struct task): Change type of member TASK_ID to hurd_task_id_t.
- Change type of member THREADS to thread_t.
- * task-class.c (task_reinit): Walk what is now the list of
- threads.
- (task_thread_alloc): Implement.
- (task_demuxer): Change msg ID of task_thread_alloc RPC (256 was
- taken by the cap-server implementation).
- (task_alloc): Create the list of threads.
- * task.c: Remove superflusous newlines from panic messages.
- (first_free_thread_no): Move global variable to ...
- (setup_threads): ... here (as local). Call thread_set_range.
-
-2004-11-01 Marcus Brinkmann <marcus@gnu.org>
-
- * task.h (struct task): Remove member OBJ.
- (task_alloc): Change type of last argument to pointer to task_t.
- (task_id_get_task): Use hurd_cap_obj_from_user.
- * task.c (create_bootstrap_caps): Remove variable STARTUP_CAP.
- Add variable TASK. Use hurd_cap_obj_to_user.
- * task-class.c (task_reinit): Use hurd_cap_obj_to_user instead of
- cast.
- (task_class_init): Use type instead size and alignment.
- (task_alloc): Change type of last argument to pointer to task_t.
- Add new variable OBJ and use it as a temporary placeholder.
-
-2004-10-29 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (task_SOURCES): Add task-id.c.
- * task.h (task_id_to_task_lock, task_id_to_task): New declarations.
- (task_id_get_task): New static inline function.
- (task_id_enter, task_id_add): New prototypes.
- * task.c (create_bootstrap_caps): Enter the new tasks into the
- hash table with task_id_enter.
- * task-id.c: New file.
-
- * ia32-cmain.c (switch_thread): Correct start of small sub stack
- address. Reported by Rian Hunter <hurd@thelaststop.net>.
-
-2004-10-28 Marcus Brinkmann <marcus@gnu.org>
-
- * task.c (bootstrap_final): New function.
- (main): Call bootstrap_final.
-
-2004-10-27 Marcus Brinkmann <marcus@gnu.org>
-
- * Makefile.am (task_SOURCES): Add physmem-user.h, physmem-user.c,
- malloc-wrap.c, mmap.c and task-class.c.
- (EXTRA_task_SOURCES): New target.
- * physmem-user.h, physmem-user.h, malloc.c, malloc-wrap.c, mmap.c,
- task-class.c: New files.
- * task.h (task_class_init, task_alloc): Add prototypes.
- * task.c: Include <hurd/startup.h> and <hurd/wortel.h>
- (__hurd_startup_data): New declaration.
- (create_bootstrap_caps, get_task_id, setup_threads, task_server):
- New functions.
- (first_free_thread_no): New global variable.
- (main): Call setup_threads and initialize server_thread. Call
- task_class_init. Create task bucket. Create manager thread and
- start it.
-
- * ia32-cmain.c (cmain): Access cap_handle member in startup_data,
- not cap_id.
-
-2004-10-26 Marcus Brinkmann <marcus@gnu.org>
-
- * ia32-crt0.S (__hurd_startup_data): New symbol.
- (_start): Save the first argument on the stack in
- __hurd_startup_data.
- * ia32-cmain.c: Include <hurd/startup.h>.
- (__hurd_startup_data): New prototype.
- (wortel_thread_id, wortel_cap_id): Do not initialize statically.
- (cmain): Initialize wortel_thread_id and wortel_cap_id.
-
-2004-04-26 Marcus Brinkmann <marcus@gnu.org>
-
- * ia32-cmain.c: Include <hurd/wortel.h>.
- (wortel_thread_id, wortel_cap_id): New variables.
- * output.c (shutdown): Include <hurd/wortel.h>.
- (shutdown): Rewritten using wortel interface.
- (putchar): Rewritten using wortel interface.
- * task.c: Remove WORTEL_* macros.
-
-2004-04-15 Marcus Brinkmann <marcus@gnu.org>
-
- * Initial check-in.
-
diff --git a/task/Makefile.am b/task/Makefile.am
deleted file mode 100644
index 56e9eee..0000000
--- a/task/Makefile.am
+++ /dev/null
@@ -1,47 +0,0 @@
-# Makefile.am - Makefile template for the task server.
-# Copyright (C) 2004, 2005 Free Software Foundation, Inc.
-# Written by Marcus Brinkmann.
-#
-# This file is part of the GNU Hurd.
-#
-# The GNU Hurd is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# The GNU Hurd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
-
-if ARCH_IA32
- ARCH_SOURCES = ia32-crt0.S ia32-cmain.c
-endif
-
-bootdir = $(prefix)/boot
-boot_PROGRAMS = task
-
-task_CPPFLAGS = -I$(top_builddir)/include \
- -I$(top_srcdir)/libc-parts $(AM_CPPFLAGS)
-
-task_SOURCES = $(ARCH_SOURCES) \
- output.h output.c \
- mmap.c malloc-wrap.c \
- task.h task.c task-class.c task-id.c thread.c
-
-# Doug Lea's malloc is included by malloc-wrap.c.
-EXTRA_task_SOURCES = malloc.c
-
-task_LDFLAGS = -u_start -e_start -nostdlib
-
-task_LDADD = ../libhurd-cap-server/libhurd-cap-server.a \
- ../libhurd-mm/libhurd-mm.a \
- ../libhurd-slab/libhurd-slab.a \
- ../libpthread/libpthread.a \
- ../libhurd-ihash/libhurd-ihash.a \
- ../libhurd-btree/libhurd-btree.a \
- ../libc-parts/libc-parts.a -lgcc
diff --git a/task/ia32-cmain.c b/task/ia32-cmain.c
deleted file mode 100644
index 54de638..0000000
--- a/task/ia32-cmain.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/* ia32-cmain.c - Startup code for the ia32.
- Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <alloca.h>
-#include <stdint.h>
-
-#include <l4/globals.h>
-#include <l4/init.h>
-#include <l4/stubs.h>
-#include <l4/stubs-init.h>
-
-#include "task.h"
-
-#include <hurd/wortel.h>
-#include <hurd/startup.h>
-#include <hurd/mm.h>
-
-
-/* Initialized by the machine-specific startup-code. */
-extern struct hurd_startup_data *__hurd_startup_data;
-
-
-/* Initialized in cmain. */
-l4_thread_id_t wortel_thread_id;
-wortel_cap_id_t wortel_cap_id;
-
-l4_thread_id_t pager_tid;
-
-
-/* Initialize libl4, setup the argument vector, and pass control over
- to the main function. */
-void
-cmain (void)
-{
- error_t err;
- int argc = 0;
- char **argv = 0;
-
- l4_init ();
- l4_init_stubs ();
-
- wortel_thread_id = __hurd_startup_data->wortel.server;
- wortel_cap_id = __hurd_startup_data->wortel.cap_handle;
-
- pager_tid = l4_global_id (l4_thread_no (l4_my_global_id ()) + 1,
- l4_version (l4_my_global_id ()));
-
- err = wortel_thread_control (pager_tid, l4_my_global_id (),
- l4_myself (), pager_tid,
- (void *)
- (l4_address (__hurd_startup_data->utcb_area)
- + l4_utcb_size ()));
- if (err)
- {
- printf ("Unable to allocate a thread for the pager thread.\n");
- for (;;)
- ;
- }
-
-
- hurd_mm_init (pager_tid);
-
- l4_set_pager (pager_tid);
-
- argc = 1;
- argv = alloca (sizeof (char *) * 2);
- argv[0] = program_name;
- argv[1] = 0;
-
- /* Now invoke the main function. */
- main (argc, argv);
-
- /* Never reached. */
-}
-
-
-#define __thread_stack_pointer() ({ \
- void *__sp__; \
- __asm__ ("movl %%esp, %0" : "=r" (__sp__)); \
- __sp__; \
-})
-
-
-#define __thread_set_stack_pointer(sp) ({ \
- __asm__ ("movl %0, %%esp" : : "r" (sp)); \
-})
-
-
-/* Switch execution transparently to thread TO. The thread FROM,
- which must be the current thread, will be halted. */
-void
-switch_thread (l4_thread_id_t from, l4_thread_id_t to)
-{
- void *current_stack;
- /* FIXME: Figure out how much we need. Probably only one return
- address. */
- char small_sub_stack[16];
- unsigned int i;
-
-/* FIXME: FROM is an argument to force gcc to evaluate it before the
- thread switch. Maybe this can be done better, but it's
- magical, so be careful. */
-
- /* Save the machine context. */
- __asm__ __volatile__ ("pusha");
- __asm__ __volatile__ ("pushf");
-
- /* Start the TO thread. It will be eventually become a clone of our
- thread. */
- current_stack = __thread_stack_pointer ();
- l4_start_sp_ip (to, (l4_word_t) current_stack,
- (l4_word_t) &&thread_switch_entry);
-
- /* We need a bit of extra space on the stack for
- l4_thread_switch. */
- __thread_set_stack_pointer (small_sub_stack + sizeof (small_sub_stack));
-
- /* We can't use while(1), because then gcc will become clever and
- optimize away everything after thread_switch_entry. */
- for (i = 1; i; i++)
- l4_thread_switch (to);
-
- thread_switch_entry:
- /* Restore the machine context. */
- __asm__ __volatile__ ("popf");
- __asm__ __volatile__ ("popa");
-
- /* The thread TO continues here. */
- l4_stop (from);
-}
diff --git a/task/ia32-crt0.S b/task/ia32-crt0.S
deleted file mode 100644
index 397b14d..0000000
--- a/task/ia32-crt0.S
+++ /dev/null
@@ -1,56 +0,0 @@
-/* ia32-crt0.S - Startup code for ia32.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-/* The size of our stack (4KB). */
-#define STACK_SIZE 0x1000
-
- .text
-
- .globl start, _start
-start:
-_start:
- /* The Hurd startup data is the first argument on the stack. */
- movl 4(%esp), %eax
- movl %eax, __hurd_startup_data
-
- /* Initialize the stack pointer. */
- movl $(stack + STACK_SIZE), %esp
-
- /* Reset EFLAGS. */
- pushl $0
- popf
-
- /* Now enter the cmain function. */
- call cmain
-
- /* Not reached. */
-loop: hlt
- jmp loop
-
- /* Our stack area. */
- .comm stack, STACK_SIZE
-
-
- .data
-
- /* This variable holds a pointer to the Hurd startup data. */
- .global __hurd_startup_data
-__hurd_startup_data:
- .long 0
diff --git a/task/malloc-wrap.c b/task/malloc-wrap.c
deleted file mode 100644
index 5f19cab..0000000
--- a/task/malloc-wrap.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* malloc-wrap.c - Doug Lea's malloc for the task server.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-/* Configuration of Doug Lea's malloc. */
-
-#include <errno.h>
-
-#include <l4.h>
-
-#define __STD_C 1
-#define LACKS_UNISTD_H
-#define LACKS_SYS_PARAM_H
-#define LACKS_FCNTL_H
-
-/* We want to use optimized versions of memset and memcpy. */
-#define HAVE_MEMCPY
-
-/* We always use the supplied mmap emulation. */
-#define MORECORE(x) MORECORE_FAILURE
-#define HAVE_MMAP 1
-#define HAVE_MREMAP 0
-#define MMAP_CLEARS 1
-#define malloc_getpagesize l4_min_page_size ()
-#define MMAP_AS_MORECORE_SIZE (16 * malloc_getpagesize)
-#define DEFAULT_MMAP_THRESHOLD (4 * malloc_getpagesize)
-#define USE_MALLOC_LOCK 1
-
-/* Suppress debug output in mstats(). */
-#define fprintf(...)
-
-/* Now include Doug Lea's malloc. */
-#include "malloc.c"
diff --git a/task/malloc.c b/task/malloc.c
deleted file mode 100644
index ca9ca25..0000000
--- a/task/malloc.c
+++ /dev/null
@@ -1,5567 +0,0 @@
-/*
- This is a version (aka dlmalloc) of malloc/free/realloc written by
- Doug Lea and released to the public domain. Use, modify, and
- redistribute this code without permission or acknowledgement in any
- way you wish. Send questions, comments, complaints, performance
- data, etc to dl@cs.oswego.edu
-
-* VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
-
- Note: There may be an updated version of this malloc obtainable at
- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
- Check before installing!
-
-* Quickstart
-
- This library is all in one file to simplify the most common usage:
- ftp it, compile it (-O), and link it into another program. All
- of the compile-time options default to reasonable values for use on
- most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
- You might later want to step through various compile-time and dynamic
- tuning options.
-
- For convenience, an include file for code using this malloc is at:
- ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
- You don't really need this .h file unless you call functions not
- defined in your system include files. The .h file contains only the
- excerpts from this file needed for using this malloc on ANSI C/C++
- systems, so long as you haven't changed compile-time options about
- naming and tuning parameters. If you do, then you can create your
- own malloc.h that does include all settings by cutting at the point
- indicated below.
-
-* Why use this malloc?
-
- This is not the fastest, most space-conserving, most portable, or
- most tunable malloc ever written. However it is among the fastest
- while also being among the most space-conserving, portable and tunable.
- Consistent balance across these factors results in a good general-purpose
- allocator for malloc-intensive programs.
-
- The main properties of the algorithms are:
- * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
- with ties normally decided via FIFO (i.e. least recently used).
- * For small (<= 64 bytes by default) requests, it is a caching
- allocator, that maintains pools of quickly recycled chunks.
- * In between, and for combinations of large and small requests, it does
- the best it can trying to meet both goals at once.
- * For very large requests (>= 128KB by default), it relies on system
- memory mapping facilities, if supported.
-
- For a longer but slightly out of date high-level description, see
- http://gee.cs.oswego.edu/dl/html/malloc.html
-
- You may already by default be using a C library containing a malloc
- that is based on some version of this malloc (for example in
- linux). You might still want to use the one in this file in order to
- customize settings or to avoid overheads associated with library
- versions.
-
-* Contents, described in more detail in "description of public routines" below.
-
- Standard (ANSI/SVID/...) functions:
- malloc(size_t n);
- calloc(size_t n_elements, size_t element_size);
- free(Void_t* p);
- realloc(Void_t* p, size_t n);
- memalign(size_t alignment, size_t n);
- valloc(size_t n);
- mallinfo()
- mallopt(int parameter_number, int parameter_value)
-
- Additional functions:
- independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
- independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
- pvalloc(size_t n);
- cfree(Void_t* p);
- malloc_trim(size_t pad);
- malloc_usable_size(Void_t* p);
- malloc_stats();
-
-* Vital statistics:
-
- Supported pointer representation: 4 or 8 bytes
- Supported size_t representation: 4 or 8 bytes
- Note that size_t is allowed to be 4 bytes even if pointers are 8.
- You can adjust this by defining INTERNAL_SIZE_T
-
- Alignment: 2 * sizeof(size_t) (default)
- (i.e., 8 byte alignment with 4byte size_t). This suffices for
- nearly all current machines and C compilers. However, you can
- define MALLOC_ALIGNMENT to be wider than this if necessary.
-
- Minimum overhead per allocated chunk: 4 or 8 bytes
- Each malloced chunk has a hidden word of overhead holding size
- and status information.
-
- Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
- 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
-
- When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
- ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
- needed; 4 (8) for a trailing size field and 8 (16) bytes for
- free list pointers. Thus, the minimum allocatable size is
- 16/24/32 bytes.
-
- Even a request for zero bytes (i.e., malloc(0)) returns a
- pointer to something of the minimum allocatable size.
-
- The maximum overhead wastage (i.e., number of extra bytes
- allocated than were requested in malloc) is less than or equal
- to the minimum size, except for requests >= mmap_threshold that
- are serviced via mmap(), where the worst case wastage is 2 *
- sizeof(size_t) bytes plus the remainder from a system page (the
- minimal mmap unit); typically 4096 or 8192 bytes.
-
- Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
- 8-byte size_t: 2^64 minus about two pages
-
- It is assumed that (possibly signed) size_t values suffice to
- represent chunk sizes. `Possibly signed' is due to the fact
- that `size_t' may be defined on a system as either a signed or
- an unsigned type. The ISO C standard says that it must be
- unsigned, but a few systems are known not to adhere to this.
- Additionally, even when size_t is unsigned, sbrk (which is by
- default used to obtain memory from system) accepts signed
- arguments, and may not be able to handle size_t-wide arguments
- with negative sign bit. Generally, values that would
- appear as negative after accounting for overhead and alignment
- are supported only via mmap(), which does not have this
- limitation.
-
- Requests for sizes outside the allowed range will perform an optional
- failure action and then return null. (Requests may also
- also fail because a system is out of memory.)
-
- Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
-
- When USE_MALLOC_LOCK is defined, wrappers are created to
- surround every public call with either a pthread mutex or
- a win32 spinlock (depending on WIN32). This is not
- especially fast, and can be a major bottleneck.
- It is designed only to provide minimal protection
- in concurrent environments, and to provide a basis for
- extensions. If you are using malloc in a concurrent program,
- you would be far better off obtaining ptmalloc, which is
- derived from a version of this malloc, and is well-tuned for
- concurrent programs. (See http://www.malloc.de) Note that
- even when USE_MALLOC_LOCK is defined, you can can guarantee
- full thread-safety only if no threads acquire memory through
- direct calls to MORECORE or other system-level allocators.
-
- Compliance: I believe it is compliant with the 1997 Single Unix Specification
- (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
- others as well.
-
-* Synopsis of compile-time options:
-
- People have reported using previous versions of this malloc on all
- versions of Unix, sometimes by tweaking some of the defines
- below. It has been tested most extensively on Solaris and
- Linux. It is also reported to work on WIN32 platforms.
- People also report using it in stand-alone embedded systems.
-
- The implementation is in straight, hand-tuned ANSI C. It is not
- at all modular. (Sorry!) It uses a lot of macros. To be at all
- usable, this code should be compiled using an optimizing compiler
- (for example gcc -O3) that can simplify expressions and control
- paths. (FAQ: some macros import variables as arguments rather than
- declare locals because people reported that some debuggers
- otherwise get confused.)
-
- OPTION DEFAULT VALUE
-
- Compilation Environment options:
-
- __STD_C derived from C compiler defines
- WIN32 NOT defined
- HAVE_MEMCPY defined
- USE_MEMCPY 1 if HAVE_MEMCPY is defined
- HAVE_MMAP defined as 1
- MMAP_CLEARS 1
- HAVE_MREMAP 0 unless linux defined
- malloc_getpagesize derived from system #includes, or 4096 if not
- HAVE_USR_INCLUDE_MALLOC_H NOT defined
- LACKS_UNISTD_H NOT defined unless WIN32
- LACKS_SYS_PARAM_H NOT defined unless WIN32
- LACKS_SYS_MMAN_H NOT defined unless WIN32
- LACKS_FCNTL_H NOT defined
-
- Changing default word sizes:
-
- INTERNAL_SIZE_T size_t
- MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
- PTR_UINT unsigned long
- CHUNK_SIZE_T unsigned long
-
- Configuration and functionality options:
-
- USE_DL_PREFIX NOT defined
- USE_PUBLIC_MALLOC_WRAPPERS NOT defined
- USE_MALLOC_LOCK NOT defined
- DEBUG NOT defined
- REALLOC_ZERO_BYTES_FREES NOT defined
- MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
- TRIM_FASTBINS 0
- FIRST_SORTED_BIN_SIZE 512
-
- Options for customizing MORECORE:
-
- MORECORE sbrk
- MORECORE_CONTIGUOUS 1
- MORECORE_CANNOT_TRIM NOT defined
- MMAP_AS_MORECORE_SIZE (1024 * 1024)
-
- Tuning options that are also dynamically changeable via mallopt:
-
- DEFAULT_MXFAST 64
- DEFAULT_TRIM_THRESHOLD 256 * 1024
- DEFAULT_TOP_PAD 0
- DEFAULT_MMAP_THRESHOLD 256 * 1024
- DEFAULT_MMAP_MAX 65536
-
- There are several other #defined constants and macros that you
- probably don't want to touch unless you are extending or adapting malloc.
-*/
-
-/*
- WIN32 sets up defaults for MS environment and compilers.
- Otherwise defaults are for unix.
-*/
-
-/* #define WIN32 */
-
-#ifdef WIN32
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-/* Win32 doesn't supply or need the following headers */
-#define LACKS_UNISTD_H
-#define LACKS_SYS_PARAM_H
-#define LACKS_SYS_MMAN_H
-
-/* Use the supplied emulation of sbrk */
-#define MORECORE sbrk
-#define MORECORE_CONTIGUOUS 1
-#define MORECORE_FAILURE ((void*)(-1))
-
-/* Use the supplied emulation of mmap and munmap */
-#define HAVE_MMAP 1
-#define MUNMAP_FAILURE (-1)
-#define MMAP_CLEARS 1
-
-/* These values don't really matter in windows mmap emulation */
-#define MAP_PRIVATE 1
-#define MAP_ANONYMOUS 2
-#define PROT_READ 1
-#define PROT_WRITE 2
-
-/* Emulation functions defined at the end of this file */
-
-/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
-#ifdef USE_MALLOC_LOCK
-static int slwait(int *sl);
-static int slrelease(int *sl);
-#endif
-
-static long getpagesize(void);
-static long getregionsize(void);
-static void *sbrk(long size);
-static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
-static long munmap(void *ptr, long size);
-
-static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed);
-static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user);
-
-#endif
-
-/*
- __STD_C should be nonzero if using ANSI-standard C compiler, a C++
- compiler, or a C compiler sufficiently close to ANSI to get away
- with it.
-*/
-
-#ifndef __STD_C
-#if defined(__STDC__) || defined(_cplusplus)
-#define __STD_C 1
-#else
-#define __STD_C 0
-#endif
-#endif /*__STD_C*/
-
-
-/*
- Void_t* is the pointer type that malloc should say it returns
-*/
-
-#ifndef Void_t
-#if (__STD_C || defined(WIN32))
-#define Void_t void
-#else
-#define Void_t char
-#endif
-#endif /*Void_t*/
-
-#if __STD_C
-#include <stddef.h> /* for size_t */
-#else
-#include <sys/types.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
-
-/* #define LACKS_UNISTD_H */
-
-#ifndef LACKS_UNISTD_H
-#include <unistd.h>
-#endif
-
-/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
-
-/* #define LACKS_SYS_PARAM_H */
-
-
-#include <stdio.h> /* needed for malloc_stats */
-#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
-
-
-/*
- Debugging:
-
- Because freed chunks may be overwritten with bookkeeping fields, this
- malloc will often die when freed memory is overwritten by user
- programs. This can be very effective (albeit in an annoying way)
- in helping track down dangling pointers.
-
- If you compile with -DDEBUG, a number of assertion checks are
- enabled that will catch more memory errors. You probably won't be
- able to make much sense of the actual assertion errors, but they
- should help you locate incorrectly overwritten memory. The
- checking is fairly extensive, and will slow down execution
- noticeably. Calling malloc_stats or mallinfo with DEBUG set will
- attempt to check every non-mmapped allocated and free chunk in the
- course of computing the summmaries. (By nature, mmapped regions
- cannot be checked very much automatically.)
-
- Setting DEBUG may also be helpful if you are trying to modify
- this code. The assertions in the check routines spell out in more
- detail the assumptions and invariants underlying the algorithms.
-
- Setting DEBUG does NOT provide an automated mechanism for checking
- that all accesses to malloced memory stay within their
- bounds. However, there are several add-ons and adaptations of this
- or other mallocs available that do this.
-*/
-
-#if DEBUG
-#include <assert.h>
-#else
-#define assert(x) ((void)0)
-#endif
-
-/*
- The unsigned integer type used for comparing any two chunk sizes.
- This should be at least as wide as size_t, but should not be signed.
-*/
-
-#ifndef CHUNK_SIZE_T
-#define CHUNK_SIZE_T unsigned long
-#endif
-
-/*
- The unsigned integer type used to hold addresses when they are are
- manipulated as integers. Except that it is not defined on all
- systems, intptr_t would suffice.
-*/
-#ifndef PTR_UINT
-#define PTR_UINT unsigned long
-#endif
-
-
-/*
- INTERNAL_SIZE_T is the word-size used for internal bookkeeping
- of chunk sizes.
-
- The default version is the same as size_t.
-
- While not strictly necessary, it is best to define this as an
- unsigned type, even if size_t is a signed type. This may avoid some
- artificial size limitations on some systems.
-
- On a 64-bit machine, you may be able to reduce malloc overhead by
- defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
- expense of not being able to handle more than 2^32 of malloced
- space. If this limitation is acceptable, you are encouraged to set
- this unless you are on a platform requiring 16byte alignments. In
- this case the alignment requirements turn out to negate any
- potential advantages of decreasing size_t word size.
-
- Implementors: Beware of the possible combinations of:
- - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
- and might be the same width as int or as long
- - size_t might have different width and signedness as INTERNAL_SIZE_T
- - int and long might be 32 or 64 bits, and might be the same width
- To deal with this, most comparisons and difference computations
- among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
- aware of the fact that casting an unsigned int to a wider long does
- not sign-extend. (This also makes checking for negative numbers
- awkward.) Some of these casts result in harmless compiler warnings
- on some systems.
-*/
-
-#ifndef INTERNAL_SIZE_T
-#define INTERNAL_SIZE_T size_t
-#endif
-
-/* The corresponding word size */
-#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
-
-
-
-/*
- MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
- It must be a power of two at least 2 * SIZE_SZ, even on machines
- for which smaller alignments would suffice. It may be defined as
- larger than this though. Note however that code and data structures
- are optimized for the case of 8-byte alignment.
-*/
-
-
-#ifndef MALLOC_ALIGNMENT
-#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
-#endif
-
-/* The corresponding bit mask value */
-#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
-
-
-
-/*
- REALLOC_ZERO_BYTES_FREES should be set if a call to
- realloc with zero bytes should be the same as a call to free.
- Some people think it should. Otherwise, since this malloc
- returns a unique pointer for malloc(0), so does realloc(p, 0).
-*/
-
-/* #define REALLOC_ZERO_BYTES_FREES */
-
-/*
- TRIM_FASTBINS controls whether free() of a very small chunk can
- immediately lead to trimming. Setting to true (1) can reduce memory
- footprint, but will almost always slow down programs that use a lot
- of small chunks.
-
- Define this only if you are willing to give up some speed to more
- aggressively reduce system-level memory footprint when releasing
- memory in programs that use many small chunks. You can get
- essentially the same effect by setting MXFAST to 0, but this can
- lead to even greater slowdowns in programs using many small chunks.
- TRIM_FASTBINS is an in-between compile-time option, that disables
- only those chunks bordering topmost memory from being placed in
- fastbins.
-*/
-
-#ifndef TRIM_FASTBINS
-#define TRIM_FASTBINS 0
-#endif
-
-
-/*
- USE_DL_PREFIX will prefix all public routines with the string 'dl'.
- This is necessary when you only want to use this malloc in one part
- of a program, using your regular system malloc elsewhere.
-*/
-
-/* #define USE_DL_PREFIX */
-
-
-/*
- USE_MALLOC_LOCK causes wrapper functions to surround each
- callable routine with pthread mutex lock/unlock.
-
- USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
-*/
-
-
-/* #define USE_MALLOC_LOCK */
-
-
-/*
- If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
- actually a wrapper function that first calls MALLOC_PREACTION, then
- calls the internal routine, and follows it with
- MALLOC_POSTACTION. This is needed for locking, but you can also use
- this, without USE_MALLOC_LOCK, for purposes of interception,
- instrumentation, etc. It is a sad fact that using wrappers often
- noticeably degrades performance of malloc-intensive programs.
-*/
-
-#ifdef USE_MALLOC_LOCK
-#define USE_PUBLIC_MALLOC_WRAPPERS
-#else
-/* #define USE_PUBLIC_MALLOC_WRAPPERS */
-#endif
-
-
-/*
- Two-phase name translation.
- All of the actual routines are given mangled names.
- When wrappers are used, they become the public callable versions.
- When DL_PREFIX is used, the callable names are prefixed.
-*/
-
-#ifndef USE_PUBLIC_MALLOC_WRAPPERS
-#define cALLOc public_cALLOc
-#define fREe public_fREe
-#define cFREe public_cFREe
-#define mALLOc public_mALLOc
-#define mEMALIGn public_mEMALIGn
-#define rEALLOc public_rEALLOc
-#define vALLOc public_vALLOc
-#define pVALLOc public_pVALLOc
-#define mALLINFo public_mALLINFo
-#define mALLOPt public_mALLOPt
-#define mTRIm public_mTRIm
-#define mSTATs public_mSTATs
-#define mUSABLe public_mUSABLe
-#define iCALLOc public_iCALLOc
-#define iCOMALLOc public_iCOMALLOc
-#endif
-
-#ifdef USE_DL_PREFIX
-#define public_cALLOc dlcalloc
-#define public_fREe dlfree
-#define public_cFREe dlcfree
-#define public_mALLOc dlmalloc
-#define public_mEMALIGn dlmemalign
-#define public_rEALLOc dlrealloc
-#define public_vALLOc dlvalloc
-#define public_pVALLOc dlpvalloc
-#define public_mALLINFo dlmallinfo
-#define public_mALLOPt dlmallopt
-#define public_mTRIm dlmalloc_trim
-#define public_mSTATs dlmalloc_stats
-#define public_mUSABLe dlmalloc_usable_size
-#define public_iCALLOc dlindependent_calloc
-#define public_iCOMALLOc dlindependent_comalloc
-#else /* USE_DL_PREFIX */
-#define public_cALLOc calloc
-#define public_fREe free
-#define public_cFREe cfree
-#define public_mALLOc malloc
-#define public_mEMALIGn memalign
-#define public_rEALLOc realloc
-#define public_vALLOc valloc
-#define public_pVALLOc pvalloc
-#define public_mALLINFo mallinfo
-#define public_mALLOPt mallopt
-#define public_mTRIm malloc_trim
-#define public_mSTATs malloc_stats
-#define public_mUSABLe malloc_usable_size
-#define public_iCALLOc independent_calloc
-#define public_iCOMALLOc independent_comalloc
-#endif /* USE_DL_PREFIX */
-
-
-/*
- HAVE_MEMCPY should be defined if you are not otherwise using
- ANSI STD C, but still have memcpy and memset in your C library
- and want to use them in calloc and realloc. Otherwise simple
- macro versions are defined below.
-
- USE_MEMCPY should be defined as 1 if you actually want to
- have memset and memcpy called. People report that the macro
- versions are faster than libc versions on some systems.
-
- Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
- (of <= 36 bytes) are manually unrolled in realloc and calloc.
-*/
-
-#define HAVE_MEMCPY
-
-#ifndef USE_MEMCPY
-#ifdef HAVE_MEMCPY
-#define USE_MEMCPY 1
-#else
-#define USE_MEMCPY 0
-#endif
-#endif
-
-
-#if (__STD_C || defined(HAVE_MEMCPY))
-
-#ifdef WIN32
-/* On Win32 memset and memcpy are already declared in windows.h */
-#else
-#if __STD_C
-void* memset(void*, int, size_t);
-void* memcpy(void*, const void*, size_t);
-#else
-Void_t* memset();
-Void_t* memcpy();
-#endif
-#endif
-#endif
-
-/*
- MALLOC_FAILURE_ACTION is the action to take before "return 0" when
- malloc fails to be able to return memory, either because memory is
- exhausted or because of illegal arguments.
-
- By default, sets errno if running on STD_C platform, else does nothing.
-*/
-
-#ifndef MALLOC_FAILURE_ACTION
-#if __STD_C
-#define MALLOC_FAILURE_ACTION \
- errno = ENOMEM;
-
-#else
-#define MALLOC_FAILURE_ACTION
-#endif
-#endif
-
-/*
- MORECORE-related declarations. By default, rely on sbrk
-*/
-
-
-#ifdef LACKS_UNISTD_H
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
-#if __STD_C
-extern Void_t* sbrk(ptrdiff_t);
-#else
-extern Void_t* sbrk();
-#endif
-#endif
-#endif
-
-/*
- MORECORE is the name of the routine to call to obtain more memory
- from the system. See below for general guidance on writing
- alternative MORECORE functions, as well as a version for WIN32 and a
- sample version for pre-OSX macos.
-*/
-
-#ifndef MORECORE
-#define MORECORE sbrk
-#endif
-
-/*
- MORECORE_FAILURE is the value returned upon failure of MORECORE
- as well as mmap. Since it cannot be an otherwise valid memory address,
- and must reflect values of standard sys calls, you probably ought not
- try to redefine it.
-*/
-
-#ifndef MORECORE_FAILURE
-#define MORECORE_FAILURE (-1)
-#endif
-
-/*
- If MORECORE_CONTIGUOUS is true, take advantage of fact that
- consecutive calls to MORECORE with positive arguments always return
- contiguous increasing addresses. This is true of unix sbrk. Even
- if not defined, when regions happen to be contiguous, malloc will
- permit allocations spanning regions obtained from different
- calls. But defining this when applicable enables some stronger
- consistency checks and space efficiencies.
-*/
-
-#ifndef MORECORE_CONTIGUOUS
-#define MORECORE_CONTIGUOUS 1
-#endif
-
-/*
- Define MORECORE_CANNOT_TRIM if your version of MORECORE
- cannot release space back to the system when given negative
- arguments. This is generally necessary only if you are using
- a hand-crafted MORECORE function that cannot handle negative arguments.
-*/
-
-/* #define MORECORE_CANNOT_TRIM */
-
-
-/*
- Define HAVE_MMAP as true to optionally make malloc() use mmap() to
- allocate very large blocks. These will be returned to the
- operating system immediately after a free(). Also, if mmap
- is available, it is used as a backup strategy in cases where
- MORECORE fails to provide space from system.
-
- This malloc is best tuned to work with mmap for large requests.
- If you do not have mmap, operations involving very large chunks (1MB
- or so) may be slower than you'd like.
-*/
-
-#ifndef HAVE_MMAP
-#define HAVE_MMAP 1
-#endif
-
-#if HAVE_MMAP
-/*
- Standard unix mmap using /dev/zero clears memory so calloc doesn't
- need to.
-*/
-
-#ifndef MMAP_CLEARS
-#define MMAP_CLEARS 1
-#endif
-
-#else /* no mmap */
-#ifndef MMAP_CLEARS
-#define MMAP_CLEARS 0
-#endif
-#endif
-
-
-/*
- MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
- sbrk fails, and mmap is used as a backup (which is done only if
- HAVE_MMAP). The value must be a multiple of page size. This
- backup strategy generally applies only when systems have "holes" in
- address space, so sbrk cannot perform contiguous expansion, but
- there is still space available on system. On systems for which
- this is known to be useful (i.e. most linux kernels), this occurs
- only when programs allocate huge amounts of memory. Between this,
- and the fact that mmap regions tend to be limited, the size should
- be large, to avoid too many mmap calls and thus avoid running out
- of kernel resources.
-*/
-
-#ifndef MMAP_AS_MORECORE_SIZE
-#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
-#endif
-
-/*
- Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
- large blocks. This is currently only possible on Linux with
- kernel versions newer than 1.3.77.
-*/
-
-#ifndef HAVE_MREMAP
-#ifdef linux
-#define HAVE_MREMAP 1
-#else
-#define HAVE_MREMAP 0
-#endif
-
-#endif /* HAVE_MMAP */
-
-
-/*
- The system page size. To the extent possible, this malloc manages
- memory from the system in page-size units. Note that this value is
- cached during initialization into a field of malloc_state. So even
- if malloc_getpagesize is a function, it is only called once.
-
- The following mechanics for getpagesize were adapted from bsd/gnu
- getpagesize.h. If none of the system-probes here apply, a value of
- 4096 is used, which should be OK: If they don't apply, then using
- the actual value probably doesn't impact performance.
-*/
-
-
-#ifndef malloc_getpagesize
-
-#ifndef LACKS_UNISTD_H
-# include <unistd.h>
-#endif
-
-# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
-# ifndef _SC_PAGE_SIZE
-# define _SC_PAGE_SIZE _SC_PAGESIZE
-# endif
-# endif
-
-# ifdef _SC_PAGE_SIZE
-# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
-# else
-# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
- extern size_t getpagesize();
-# define malloc_getpagesize getpagesize()
-# else
-# ifdef WIN32 /* use supplied emulation of getpagesize */
-# define malloc_getpagesize getpagesize()
-# else
-# ifndef LACKS_SYS_PARAM_H
-# include <sys/param.h>
-# endif
-# ifdef EXEC_PAGESIZE
-# define malloc_getpagesize EXEC_PAGESIZE
-# else
-# ifdef NBPG
-# ifndef CLSIZE
-# define malloc_getpagesize NBPG
-# else
-# define malloc_getpagesize (NBPG * CLSIZE)
-# endif
-# else
-# ifdef NBPC
-# define malloc_getpagesize NBPC
-# else
-# ifdef PAGESIZE
-# define malloc_getpagesize PAGESIZE
-# else /* just guess */
-# define malloc_getpagesize (4096)
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-#endif
-
-/*
- This version of malloc supports the standard SVID/XPG mallinfo
- routine that returns a struct containing usage properties and
- statistics. It should work on any SVID/XPG compliant system that has
- a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
- install such a thing yourself, cut out the preliminary declarations
- as described above and below and save them in a malloc.h file. But
- there's no compelling reason to bother to do this.)
-
- The main declaration needed is the mallinfo struct that is returned
- (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
- bunch of fields that are not even meaningful in this version of
- malloc. These fields are are instead filled by mallinfo() with
- other numbers that might be of interest.
-
- HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
- /usr/include/malloc.h file that includes a declaration of struct
- mallinfo. If so, it is included; else an SVID2/XPG2 compliant
- version is declared below. These must be precisely the same for
- mallinfo() to work. The original SVID version of this struct,
- defined on most systems with mallinfo, declares all fields as
- ints. But some others define as unsigned long. If your system
- defines the fields using a type of different width than listed here,
- you must #include your system version and #define
- HAVE_USR_INCLUDE_MALLOC_H.
-*/
-
-/* #define HAVE_USR_INCLUDE_MALLOC_H */
-
-#ifdef HAVE_USR_INCLUDE_MALLOC_H
-#include "/usr/include/malloc.h"
-#else
-
-/* SVID2/XPG mallinfo structure */
-
-struct mallinfo {
- int arena; /* non-mmapped space allocated from system */
- int ordblks; /* number of free chunks */
- int smblks; /* number of fastbin blocks */
- int hblks; /* number of mmapped regions */
- int hblkhd; /* space in mmapped regions */
- int usmblks; /* maximum total allocated space */
- int fsmblks; /* space available in freed fastbin blocks */
- int uordblks; /* total allocated space */
- int fordblks; /* total free space */
- int keepcost; /* top-most, releasable (via malloc_trim) space */
-};
-
-/*
- SVID/XPG defines four standard parameter numbers for mallopt,
- normally defined in malloc.h. Only one of these (M_MXFAST) is used
- in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
- so setting them has no effect. But this malloc also supports other
- options in mallopt described below.
-*/
-#endif
-
-
-/* ---------- description of public routines ------------ */
-
-/*
- malloc(size_t n)
- Returns a pointer to a newly allocated chunk of at least n bytes, or null
- if no space is available. Additionally, on failure, errno is
- set to ENOMEM on ANSI C systems.
-
- If n is zero, malloc returns a minumum-sized chunk. (The minimum
- size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
- systems.) On most systems, size_t is an unsigned type, so calls
- with negative arguments are interpreted as requests for huge amounts
- of space, which will often fail. The maximum supported value of n
- differs across systems, but is in all cases less than the maximum
- representable value of a size_t.
-*/
-#if __STD_C
-Void_t* public_mALLOc(size_t);
-#else
-Void_t* public_mALLOc();
-#endif
-
-/*
- free(Void_t* p)
- Releases the chunk of memory pointed to by p, that had been previously
- allocated using malloc or a related routine such as realloc.
- It has no effect if p is null. It can have arbitrary (i.e., bad!)
- effects if p has already been freed.
-
- Unless disabled (using mallopt), freeing very large spaces will
- when possible, automatically trigger operations that give
- back unused memory to the system, thus reducing program footprint.
-*/
-#if __STD_C
-void public_fREe(Void_t*);
-#else
-void public_fREe();
-#endif
-
-/*
- calloc(size_t n_elements, size_t element_size);
- Returns a pointer to n_elements * element_size bytes, with all locations
- set to zero.
-*/
-#if __STD_C
-Void_t* public_cALLOc(size_t, size_t);
-#else
-Void_t* public_cALLOc();
-#endif
-
-/*
- realloc(Void_t* p, size_t n)
- Returns a pointer to a chunk of size n that contains the same data
- as does chunk p up to the minimum of (n, p's size) bytes, or null
- if no space is available.
-
- The returned pointer may or may not be the same as p. The algorithm
- prefers extending p when possible, otherwise it employs the
- equivalent of a malloc-copy-free sequence.
-
- If p is null, realloc is equivalent to malloc.
-
- If space is not available, realloc returns null, errno is set (if on
- ANSI) and p is NOT freed.
-
- if n is for fewer bytes than already held by p, the newly unused
- space is lopped off and freed if possible. Unless the #define
- REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
- zero (re)allocates a minimum-sized chunk.
-
- Large chunks that were internally obtained via mmap will always
- be reallocated using malloc-copy-free sequences unless
- the system supports MREMAP (currently only linux).
-
- The old unix realloc convention of allowing the last-free'd chunk
- to be used as an argument to realloc is not supported.
-*/
-#if __STD_C
-Void_t* public_rEALLOc(Void_t*, size_t);
-#else
-Void_t* public_rEALLOc();
-#endif
-
-/*
- memalign(size_t alignment, size_t n);
- Returns a pointer to a newly allocated chunk of n bytes, aligned
- in accord with the alignment argument.
-
- The alignment argument should be a power of two. If the argument is
- not a power of two, the nearest greater power is used.
- 8-byte alignment is guaranteed by normal malloc calls, so don't
- bother calling memalign with an argument of 8 or less.
-
- Overreliance on memalign is a sure way to fragment space.
-*/
-#if __STD_C
-Void_t* public_mEMALIGn(size_t, size_t);
-#else
-Void_t* public_mEMALIGn();
-#endif
-
-/*
- valloc(size_t n);
- Equivalent to memalign(pagesize, n), where pagesize is the page
- size of the system. If the pagesize is unknown, 4096 is used.
-*/
-#if __STD_C
-Void_t* public_vALLOc(size_t);
-#else
-Void_t* public_vALLOc();
-#endif
-
-
-
-/*
- mallopt(int parameter_number, int parameter_value)
- Sets tunable parameters The format is to provide a
- (parameter-number, parameter-value) pair. mallopt then sets the
- corresponding parameter to the argument value if it can (i.e., so
- long as the value is meaningful), and returns 1 if successful else
- 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
- normally defined in malloc.h. Only one of these (M_MXFAST) is used
- in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
- so setting them has no effect. But this malloc also supports four
- other options in mallopt. See below for details. Briefly, supported
- parameters are as follows (listed defaults are for "typical"
- configurations).
-
- Symbol param # default allowed param values
- M_MXFAST 1 64 0-80 (0 disables fastbins)
- M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
- M_TOP_PAD -2 0 any
- M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
- M_MMAP_MAX -4 65536 any (0 disables use of mmap)
-*/
-#if __STD_C
-int public_mALLOPt(int, int);
-#else
-int public_mALLOPt();
-#endif
-
-
-/*
- mallinfo()
- Returns (by copy) a struct containing various summary statistics:
-
- arena: current total non-mmapped bytes allocated from system
- ordblks: the number of free chunks
- smblks: the number of fastbin blocks (i.e., small chunks that
- have been freed but not use resused or consolidated)
- hblks: current number of mmapped regions
- hblkhd: total bytes held in mmapped regions
- usmblks: the maximum total allocated space. This will be greater
- than current total if trimming has occurred.
- fsmblks: total bytes held in fastbin blocks
- uordblks: current total allocated space (normal or mmapped)
- fordblks: total free space
- keepcost: the maximum number of bytes that could ideally be released
- back to system via malloc_trim. ("ideally" means that
- it ignores page restrictions etc.)
-
- Because these fields are ints, but internal bookkeeping may
- be kept as longs, the reported values may wrap around zero and
- thus be inaccurate.
-*/
-#if __STD_C
-struct mallinfo public_mALLINFo(void);
-#else
-struct mallinfo public_mALLINFo();
-#endif
-
-/*
- independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
-
- independent_calloc is similar to calloc, but instead of returning a
- single cleared space, it returns an array of pointers to n_elements
- independent elements that can hold contents of size elem_size, each
- of which starts out cleared, and can be independently freed,
- realloc'ed etc. The elements are guaranteed to be adjacently
- allocated (this is not guaranteed to occur with multiple callocs or
- mallocs), which may also improve cache locality in some
- applications.
-
- The "chunks" argument is optional (i.e., may be null, which is
- probably the most typical usage). If it is null, the returned array
- is itself dynamically allocated and should also be freed when it is
- no longer needed. Otherwise, the chunks array must be of at least
- n_elements in length. It is filled in with the pointers to the
- chunks.
-
- In either case, independent_calloc returns this pointer array, or
- null if the allocation failed. If n_elements is zero and "chunks"
- is null, it returns a chunk representing an array with zero elements
- (which should be freed if not wanted).
-
- Each element must be individually freed when it is no longer
- needed. If you'd like to instead be able to free all at once, you
- should instead use regular calloc and assign pointers into this
- space to represent elements. (In this case though, you cannot
- independently free elements.)
-
- independent_calloc simplifies and speeds up implementations of many
- kinds of pools. It may also be useful when constructing large data
- structures that initially have a fixed number of fixed-sized nodes,
- but the number is not known at compile time, and some of the nodes
- may later need to be freed. For example:
-
- struct Node { int item; struct Node* next; };
-
- struct Node* build_list() {
- struct Node** pool;
- int n = read_number_of_nodes_needed();
- if (n <= 0) return 0;
- pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
- if (pool == 0) die();
- // organize into a linked list...
- struct Node* first = pool[0];
- for (i = 0; i < n-1; ++i)
- pool[i]->next = pool[i+1];
- free(pool); // Can now free the array (or not, if it is needed later)
- return first;
- }
-*/
-#if __STD_C
-Void_t** public_iCALLOc(size_t, size_t, Void_t**);
-#else
-Void_t** public_iCALLOc();
-#endif
-
-/*
- independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
-
- independent_comalloc allocates, all at once, a set of n_elements
- chunks with sizes indicated in the "sizes" array. It returns
- an array of pointers to these elements, each of which can be
- independently freed, realloc'ed etc. The elements are guaranteed to
- be adjacently allocated (this is not guaranteed to occur with
- multiple callocs or mallocs), which may also improve cache locality
- in some applications.
-
- The "chunks" argument is optional (i.e., may be null). If it is null
- the returned array is itself dynamically allocated and should also
- be freed when it is no longer needed. Otherwise, the chunks array
- must be of at least n_elements in length. It is filled in with the
- pointers to the chunks.
-
- In either case, independent_comalloc returns this pointer array, or
- null if the allocation failed. If n_elements is zero and chunks is
- null, it returns a chunk representing an array with zero elements
- (which should be freed if not wanted).
-
- Each element must be individually freed when it is no longer
- needed. If you'd like to instead be able to free all at once, you
- should instead use a single regular malloc, and assign pointers at
- particular offsets in the aggregate space. (In this case though, you
- cannot independently free elements.)
-
- independent_comallac differs from independent_calloc in that each
- element may have a different size, and also that it does not
- automatically clear elements.
-
- independent_comalloc can be used to speed up allocation in cases
- where several structs or objects must always be allocated at the
- same time. For example:
-
- struct Head { ... }
- struct Foot { ... }
-
- void send_message(char* msg) {
- int msglen = strlen(msg);
- size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
- void* chunks[3];
- if (independent_comalloc(3, sizes, chunks) == 0)
- die();
- struct Head* head = (struct Head*)(chunks[0]);
- char* body = (char*)(chunks[1]);
- struct Foot* foot = (struct Foot*)(chunks[2]);
- // ...
- }
-
- In general though, independent_comalloc is worth using only for
- larger values of n_elements. For small values, you probably won't
- detect enough difference from series of malloc calls to bother.
-
- Overuse of independent_comalloc can increase overall memory usage,
- since it cannot reuse existing noncontiguous small chunks that
- might be available for some of the elements.
-*/
-#if __STD_C
-Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
-#else
-Void_t** public_iCOMALLOc();
-#endif
-
-
-/*
- pvalloc(size_t n);
- Equivalent to valloc(minimum-page-that-holds(n)), that is,
- round up n to nearest pagesize.
- */
-#if __STD_C
-Void_t* public_pVALLOc(size_t);
-#else
-Void_t* public_pVALLOc();
-#endif
-
-/*
- cfree(Void_t* p);
- Equivalent to free(p).
-
- cfree is needed/defined on some systems that pair it with calloc,
- for odd historical reasons (such as: cfree is used in example
- code in the first edition of K&R).
-*/
-#if __STD_C
-void public_cFREe(Void_t*);
-#else
-void public_cFREe();
-#endif
-
-/*
- malloc_trim(size_t pad);
-
- If possible, gives memory back to the system (via negative
- arguments to sbrk) if there is unused memory at the `high' end of
- the malloc pool. You can call this after freeing large blocks of
- memory to potentially reduce the system-level memory requirements
- of a program. However, it cannot guarantee to reduce memory. Under
- some allocation patterns, some large free blocks of memory will be
- locked between two used chunks, so they cannot be given back to
- the system.
-
- The `pad' argument to malloc_trim represents the amount of free
- trailing space to leave untrimmed. If this argument is zero,
- only the minimum amount of memory to maintain internal data
- structures will be left (one page or less). Non-zero arguments
- can be supplied to maintain enough trailing space to service
- future expected allocations without having to re-obtain memory
- from the system.
-
- Malloc_trim returns 1 if it actually released any memory, else 0.
- On systems that do not support "negative sbrks", it will always
- rreturn 0.
-*/
-#if __STD_C
-int public_mTRIm(size_t);
-#else
-int public_mTRIm();
-#endif
-
-/*
- malloc_usable_size(Void_t* p);
-
- Returns the number of bytes you can actually use in
- an allocated chunk, which may be more than you requested (although
- often not) due to alignment and minimum size constraints.
- You can use this many bytes without worrying about
- overwriting other allocated objects. This is not a particularly great
- programming practice. malloc_usable_size can be more useful in
- debugging and assertions, for example:
-
- p = malloc(n);
- assert(malloc_usable_size(p) >= 256);
-
-*/
-#if __STD_C
-size_t public_mUSABLe(Void_t*);
-#else
-size_t public_mUSABLe();
-#endif
-
-/*
- malloc_stats();
- Prints on stderr the amount of space obtained from the system (both
- via sbrk and mmap), the maximum amount (which may be more than
- current if malloc_trim and/or munmap got called), and the current
- number of bytes allocated via malloc (or realloc, etc) but not yet
- freed. Note that this is the number of bytes allocated, not the
- number requested. It will be larger than the number requested
- because of alignment and bookkeeping overhead. Because it includes
- alignment wastage as being in use, this figure may be greater than
- zero even when no user-level chunks are allocated.
-
- The reported current and maximum system memory can be inaccurate if
- a program makes other calls to system memory allocation functions
- (normally sbrk) outside of malloc.
-
- malloc_stats prints only the most commonly interesting statistics.
- More information can be obtained by calling mallinfo.
-
-*/
-#if __STD_C
-void public_mSTATs();
-#else
-void public_mSTATs();
-#endif
-
-/* mallopt tuning options */
-
-/*
- M_MXFAST is the maximum request size used for "fastbins", special bins
- that hold returned chunks without consolidating their spaces. This
- enables future requests for chunks of the same size to be handled
- very quickly, but can increase fragmentation, and thus increase the
- overall memory footprint of a program.
-
- This malloc manages fastbins very conservatively yet still
- efficiently, so fragmentation is rarely a problem for values less
- than or equal to the default. The maximum supported value of MXFAST
- is 80. You wouldn't want it any higher than this anyway. Fastbins
- are designed especially for use with many small structs, objects or
- strings -- the default handles structs/objects/arrays with sizes up
- to 16 4byte fields, or small strings representing words, tokens,
- etc. Using fastbins for larger objects normally worsens
- fragmentation without improving speed.
-
- M_MXFAST is set in REQUEST size units. It is internally used in
- chunksize units, which adds padding and alignment. You can reduce
- M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
- algorithm to be a closer approximation of fifo-best-fit in all cases,
- not just for larger requests, but will generally cause it to be
- slower.
-*/
-
-
-/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
-#ifndef M_MXFAST
-#define M_MXFAST 1
-#endif
-
-#ifndef DEFAULT_MXFAST
-#define DEFAULT_MXFAST 64
-#endif
-
-
-/*
- M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
- to keep before releasing via malloc_trim in free().
-
- Automatic trimming is mainly useful in long-lived programs.
- Because trimming via sbrk can be slow on some systems, and can
- sometimes be wasteful (in cases where programs immediately
- afterward allocate more large chunks) the value should be high
- enough so that your overall system performance would improve by
- releasing this much memory.
-
- The trim threshold and the mmap control parameters (see below)
- can be traded off with one another. Trimming and mmapping are
- two different ways of releasing unused memory back to the
- system. Between these two, it is often possible to keep
- system-level demands of a long-lived program down to a bare
- minimum. For example, in one test suite of sessions measuring
- the XF86 X server on Linux, using a trim threshold of 128K and a
- mmap threshold of 192K led to near-minimal long term resource
- consumption.
-
- If you are using this malloc in a long-lived program, it should
- pay to experiment with these values. As a rough guide, you
- might set to a value close to the average size of a process
- (program) running on your system. Releasing this much memory
- would allow such a process to run in memory. Generally, it's
- worth it to tune for trimming rather tham memory mapping when a
- program undergoes phases where several large chunks are
- allocated and released in ways that can reuse each other's
- storage, perhaps mixed with phases where there are no such
- chunks at all. And in well-behaved long-lived programs,
- controlling release of large blocks via trimming versus mapping
- is usually faster.
-
- However, in most programs, these parameters serve mainly as
- protection against the system-level effects of carrying around
- massive amounts of unneeded memory. Since frequent calls to
- sbrk, mmap, and munmap otherwise degrade performance, the default
- parameters are set to relatively high values that serve only as
- safeguards.
-
- The trim value must be greater than page size to have any useful
- effect. To disable trimming completely, you can set to
- (unsigned long)(-1)
-
- Trim settings interact with fastbin (MXFAST) settings: Unless
- TRIM_FASTBINS is defined, automatic trimming never takes place upon
- freeing a chunk with size less than or equal to MXFAST. Trimming is
- instead delayed until subsequent freeing of larger chunks. However,
- you can still force an attempted trim by calling malloc_trim.
-
- Also, trimming is not generally possible in cases where
- the main arena is obtained via mmap.
-
- Note that the trick some people use of mallocing a huge space and
- then freeing it at program startup, in an attempt to reserve system
- memory, doesn't have the intended effect under automatic trimming,
- since that memory will immediately be returned to the system.
-*/
-
-#define M_TRIM_THRESHOLD -1
-
-#ifndef DEFAULT_TRIM_THRESHOLD
-#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
-#endif
-
-/*
- M_TOP_PAD is the amount of extra `padding' space to allocate or
- retain whenever sbrk is called. It is used in two ways internally:
-
- * When sbrk is called to extend the top of the arena to satisfy
- a new malloc request, this much padding is added to the sbrk
- request.
-
- * When malloc_trim is called automatically from free(),
- it is used as the `pad' argument.
-
- In both cases, the actual amount of padding is rounded
- so that the end of the arena is always a system page boundary.
-
- The main reason for using padding is to avoid calling sbrk so
- often. Having even a small pad greatly reduces the likelihood
- that nearly every malloc request during program start-up (or
- after trimming) will invoke sbrk, which needlessly wastes
- time.
-
- Automatic rounding-up to page-size units is normally sufficient
- to avoid measurable overhead, so the default is 0. However, in
- systems where sbrk is relatively slow, it can pay to increase
- this value, at the expense of carrying around more memory than
- the program needs.
-*/
-
-#define M_TOP_PAD -2
-
-#ifndef DEFAULT_TOP_PAD
-#define DEFAULT_TOP_PAD (0)
-#endif
-
-/*
- M_MMAP_THRESHOLD is the request size threshold for using mmap()
- to service a request. Requests of at least this size that cannot
- be allocated using already-existing space will be serviced via mmap.
- (If enough normal freed space already exists it is used instead.)
-
- Using mmap segregates relatively large chunks of memory so that
- they can be individually obtained and released from the host
- system. A request serviced through mmap is never reused by any
- other request (at least not directly; the system may just so
- happen to remap successive requests to the same locations).
-
- Segregating space in this way has the benefits that:
-
- 1. Mmapped space can ALWAYS be individually released back
- to the system, which helps keep the system level memory
- demands of a long-lived program low.
- 2. Mapped memory can never become `locked' between
- other chunks, as can happen with normally allocated chunks, which
- means that even trimming via malloc_trim would not release them.
- 3. On some systems with "holes" in address spaces, mmap can obtain
- memory that sbrk cannot.
-
- However, it has the disadvantages that:
-
- 1. The space cannot be reclaimed, consolidated, and then
- used to service later requests, as happens with normal chunks.
- 2. It can lead to more wastage because of mmap page alignment
- requirements
- 3. It causes malloc performance to be more dependent on host
- system memory management support routines which may vary in
- implementation quality and may impose arbitrary
- limitations. Generally, servicing a request via normal
- malloc steps is faster than going through a system's mmap.
-
- The advantages of mmap nearly always outweigh disadvantages for
- "large" chunks, but the value of "large" varies across systems. The
- default is an empirically derived value that works well in most
- systems.
-*/
-
-#define M_MMAP_THRESHOLD -3
-
-#ifndef DEFAULT_MMAP_THRESHOLD
-#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
-#endif
-
-/*
- M_MMAP_MAX is the maximum number of requests to simultaneously
- service using mmap. This parameter exists because
-. Some systems have a limited number of internal tables for
- use by mmap, and using more than a few of them may degrade
- performance.
-
- The default is set to a value that serves only as a safeguard.
- Setting to 0 disables use of mmap for servicing large requests. If
- HAVE_MMAP is not set, the default value is 0, and attempts to set it
- to non-zero values in mallopt will fail.
-*/
-
-#define M_MMAP_MAX -4
-
-#ifndef DEFAULT_MMAP_MAX
-#if HAVE_MMAP
-#define DEFAULT_MMAP_MAX (65536)
-#else
-#define DEFAULT_MMAP_MAX (0)
-#endif
-#endif
-
-#ifdef __cplusplus
-}; /* end of extern "C" */
-#endif
-
-/*
- ========================================================================
- To make a fully customizable malloc.h header file, cut everything
- above this line, put into file malloc.h, edit to suit, and #include it
- on the next line, as well as in programs that use this malloc.
- ========================================================================
-*/
-
-/* #include "malloc.h" */
-
-/* --------------------- public wrappers ---------------------- */
-
-#ifdef USE_PUBLIC_MALLOC_WRAPPERS
-
-/* Declare all routines as internal */
-#if __STD_C
-static Void_t* mALLOc(size_t);
-static void fREe(Void_t*);
-static Void_t* rEALLOc(Void_t*, size_t);
-static Void_t* mEMALIGn(size_t, size_t);
-static Void_t* vALLOc(size_t);
-static Void_t* pVALLOc(size_t);
-static Void_t* cALLOc(size_t, size_t);
-static Void_t** iCALLOc(size_t, size_t, Void_t**);
-static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
-static void cFREe(Void_t*);
-static int mTRIm(size_t);
-static size_t mUSABLe(Void_t*);
-static void mSTATs();
-static int mALLOPt(int, int);
-static struct mallinfo mALLINFo(void);
-#else
-static Void_t* mALLOc();
-static void fREe();
-static Void_t* rEALLOc();
-static Void_t* mEMALIGn();
-static Void_t* vALLOc();
-static Void_t* pVALLOc();
-static Void_t* cALLOc();
-static Void_t** iCALLOc();
-static Void_t** iCOMALLOc();
-static void cFREe();
-static int mTRIm();
-static size_t mUSABLe();
-static void mSTATs();
-static int mALLOPt();
-static struct mallinfo mALLINFo();
-#endif
-
-/*
- MALLOC_PREACTION and MALLOC_POSTACTION should be
- defined to return 0 on success, and nonzero on failure.
- The return value of MALLOC_POSTACTION is currently ignored
- in wrapper functions since there is no reasonable default
- action to take on failure.
-*/
-
-
-#ifdef USE_MALLOC_LOCK
-
-#ifdef WIN32
-
-static int mALLOC_MUTEx;
-#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
-#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
-
-#else
-
-#include <pthread.h>
-
-static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
-
-#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
-#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
-
-#endif /* USE_MALLOC_LOCK */
-
-#else
-
-/* Substitute anything you like for these */
-
-#define MALLOC_PREACTION (0)
-#define MALLOC_POSTACTION (0)
-
-#endif
-
-Void_t* public_mALLOc(size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = mALLOc(bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-void public_fREe(Void_t* m) {
- if (MALLOC_PREACTION != 0) {
- return;
- }
- fREe(m);
- if (MALLOC_POSTACTION != 0) {
- }
-}
-
-Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = rEALLOc(m, bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = mEMALIGn(alignment, bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_vALLOc(size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = vALLOc(bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_pVALLOc(size_t bytes) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = pVALLOc(bytes);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t* public_cALLOc(size_t n, size_t elem_size) {
- Void_t* m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = cALLOc(n, elem_size);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-
-Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
- Void_t** m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = iCALLOc(n, elem_size, chunks);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
- Void_t** m;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- m = iCOMALLOc(n, sizes, chunks);
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-void public_cFREe(Void_t* m) {
- if (MALLOC_PREACTION != 0) {
- return;
- }
- cFREe(m);
- if (MALLOC_POSTACTION != 0) {
- }
-}
-
-int public_mTRIm(size_t s) {
- int result;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- result = mTRIm(s);
- if (MALLOC_POSTACTION != 0) {
- }
- return result;
-}
-
-size_t public_mUSABLe(Void_t* m) {
- size_t result;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- result = mUSABLe(m);
- if (MALLOC_POSTACTION != 0) {
- }
- return result;
-}
-
-void public_mSTATs() {
- if (MALLOC_PREACTION != 0) {
- return;
- }
- mSTATs();
- if (MALLOC_POSTACTION != 0) {
- }
-}
-
-struct mallinfo public_mALLINFo() {
- struct mallinfo m;
- if (MALLOC_PREACTION != 0) {
- struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- return nm;
- }
- m = mALLINFo();
- if (MALLOC_POSTACTION != 0) {
- }
- return m;
-}
-
-int public_mALLOPt(int p, int v) {
- int result;
- if (MALLOC_PREACTION != 0) {
- return 0;
- }
- result = mALLOPt(p, v);
- if (MALLOC_POSTACTION != 0) {
- }
- return result;
-}
-
-#endif
-
-
-
-/* ------------- Optional versions of memcopy ---------------- */
-
-
-#if USE_MEMCPY
-
-/*
- Note: memcpy is ONLY invoked with non-overlapping regions,
- so the (usually slower) memmove is not needed.
-*/
-
-#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
-#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
-
-#else /* !USE_MEMCPY */
-
-/* Use Duff's device for good zeroing/copying performance. */
-
-#define MALLOC_ZERO(charp, nbytes) \
-do { \
- INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
- CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
- long mcn; \
- if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
- switch (mctmp) { \
- case 0: for(;;) { *mzp++ = 0; \
- case 7: *mzp++ = 0; \
- case 6: *mzp++ = 0; \
- case 5: *mzp++ = 0; \
- case 4: *mzp++ = 0; \
- case 3: *mzp++ = 0; \
- case 2: *mzp++ = 0; \
- case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
- } \
-} while(0)
-
-#define MALLOC_COPY(dest,src,nbytes) \
-do { \
- INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
- INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
- CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
- long mcn; \
- if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
- switch (mctmp) { \
- case 0: for(;;) { *mcdst++ = *mcsrc++; \
- case 7: *mcdst++ = *mcsrc++; \
- case 6: *mcdst++ = *mcsrc++; \
- case 5: *mcdst++ = *mcsrc++; \
- case 4: *mcdst++ = *mcsrc++; \
- case 3: *mcdst++ = *mcsrc++; \
- case 2: *mcdst++ = *mcsrc++; \
- case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
- } \
-} while(0)
-
-#endif
-
-/* ------------------ MMAP support ------------------ */
-
-
-#if HAVE_MMAP
-
-#ifndef LACKS_FCNTL_H
-#include <fcntl.h>
-#endif
-
-#ifndef LACKS_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-/*
- Nearly all versions of mmap support MAP_ANONYMOUS,
- so the following is unlikely to be needed, but is
- supplied just in case.
-*/
-
-#ifndef MAP_ANONYMOUS
-
-static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
-
-#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
- (dev_zero_fd = open("/dev/zero", O_RDWR), \
- mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
- mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
-
-#else
-
-#define MMAP(addr, size, prot, flags) \
- (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
-
-#endif
-
-
-#endif /* HAVE_MMAP */
-
-
-/*
- ----------------------- Chunk representations -----------------------
-*/
-
-
-/*
- This struct declaration is misleading (but accurate and necessary).
- It declares a "view" into memory allowing access to necessary
- fields at known offsets from a given base. See explanation below.
-*/
-
-struct malloc_chunk {
-
- INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
- INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
-
- struct malloc_chunk* fd; /* double links -- used only if free. */
- struct malloc_chunk* bk;
-};
-
-
-typedef struct malloc_chunk* mchunkptr;
-
-/*
- malloc_chunk details:
-
- (The following includes lightly edited explanations by Colin Plumb.)
-
- Chunks of memory are maintained using a `boundary tag' method as
- described in e.g., Knuth or Standish. (See the paper by Paul
- Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
- survey of such techniques.) Sizes of free chunks are stored both
- in the front of each chunk and at the end. This makes
- consolidating fragmented chunks into bigger chunks very fast. The
- size fields also hold bits representing whether chunks are free or
- in use.
-
- An allocated chunk looks like this:
-
-
- chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if allocated | |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |P|
- mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | User data starts here... .
- . .
- . (malloc_usable_space() bytes) .
- . |
-nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-
- Where "chunk" is the front of the chunk for the purpose of most of
- the malloc code, but "mem" is the pointer that is returned to the
- user. "Nextchunk" is the beginning of the next contiguous chunk.
-
- Chunks always begin on even word boundries, so the mem portion
- (which is returned to the user) is also on an even word boundary, and
- thus at least double-word aligned.
-
- Free chunks are stored in circular doubly-linked lists, and look like this:
-
- chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `head:' | Size of chunk, in bytes |P|
- mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Forward pointer to next chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Back pointer to previous chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Unused space (may be 0 bytes long) .
- . .
- . |
-nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `foot:' | Size of chunk, in bytes |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
- The P (PREV_INUSE) bit, stored in the unused low-order bit of the
- chunk size (which is always a multiple of two words), is an in-use
- bit for the *previous* chunk. If that bit is *clear*, then the
- word before the current chunk size contains the previous chunk
- size, and can be used to find the front of the previous chunk.
- The very first chunk allocated always has this bit set,
- preventing access to non-existent (or non-owned) memory. If
- prev_inuse is set for any given chunk, then you CANNOT determine
- the size of the previous chunk, and might even get a memory
- addressing fault when trying to do so.
-
- Note that the `foot' of the current chunk is actually represented
- as the prev_size of the NEXT chunk. This makes it easier to
- deal with alignments etc but can be very confusing when trying
- to extend or adapt this code.
-
- The two exceptions to all this are
-
- 1. The special chunk `top' doesn't bother using the
- trailing size field since there is no next contiguous chunk
- that would have to index off it. After initialization, `top'
- is forced to always exist. If it would become less than
- MINSIZE bytes long, it is replenished.
-
- 2. Chunks allocated via mmap, which have the second-lowest-order
- bit (IS_MMAPPED) set in their size fields. Because they are
- allocated one-by-one, each must contain its own trailing size field.
-
-*/
-
-/*
- ---------- Size and alignment checks and conversions ----------
-*/
-
-/* conversion from malloc headers to user pointers, and back */
-
-#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
-#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
-
-/* The smallest possible chunk */
-#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
-
-/* The smallest size we can malloc is an aligned minimal chunk */
-
-#define MINSIZE \
- (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
-
-/* Check if m has acceptable alignment */
-
-#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
-
-
-/*
- Check if a request is so large that it would wrap around zero when
- padded and aligned. To simplify some other code, the bound is made
- low enough so that adding MINSIZE will also not wrap around sero.
-*/
-
-#define REQUEST_OUT_OF_RANGE(req) \
- ((CHUNK_SIZE_T)(req) >= \
- (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE))
-
-/* pad request bytes into a usable size -- internal version */
-
-#define request2size(req) \
- (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
- MINSIZE : \
- ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
-
-/* Same, except also perform argument check */
-
-#define checked_request2size(req, sz) \
- if (REQUEST_OUT_OF_RANGE(req)) { \
- MALLOC_FAILURE_ACTION; \
- return 0; \
- } \
- (sz) = request2size(req);
-
-/*
- --------------- Physical chunk operations ---------------
-*/
-
-
-/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
-#define PREV_INUSE 0x1
-
-/* extract inuse bit of previous chunk */
-#define prev_inuse(p) ((p)->size & PREV_INUSE)
-
-
-/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
-#define IS_MMAPPED 0x2
-
-/* check for mmap()'ed chunk */
-#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
-
-/*
- Bits to mask off when extracting size
-
- Note: IS_MMAPPED is intentionally not masked off from size field in
- macros for which mmapped chunks should never be seen. This should
- cause helpful core dumps to occur if it is tried by accident by
- people extending or adapting this malloc.
-*/
-#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
-
-/* Get size, ignoring use bits */
-#define chunksize(p) ((p)->size & ~(SIZE_BITS))
-
-
-/* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
-
-/* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
-
-/* Treat space at ptr + offset as a chunk */
-#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
-
-/* extract p's inuse bit */
-#define inuse(p)\
-((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
-
-/* set/clear chunk as being inuse without otherwise disturbing */
-#define set_inuse(p)\
-((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
-
-#define clear_inuse(p)\
-((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
-
-
-/* check/set/clear inuse bits in known places */
-#define inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
-
-#define set_inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
-
-#define clear_inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
-
-
-/* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
-
-/* Set size/use field */
-#define set_head(p, s) ((p)->size = (s))
-
-/* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
-
-
-/*
- -------------------- Internal data structures --------------------
-
- All internal state is held in an instance of malloc_state defined
- below. There are no other static variables, except in two optional
- cases:
- * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
- * If HAVE_MMAP is true, but mmap doesn't support
- MAP_ANONYMOUS, a dummy file descriptor for mmap.
-
- Beware of lots of tricks that minimize the total bookkeeping space
- requirements. The result is a little over 1K bytes (for 4byte
- pointers and size_t.)
-*/
-
-/*
- Bins
-
- An array of bin headers for free chunks. Each bin is doubly
- linked. The bins are approximately proportionally (log) spaced.
- There are a lot of these bins (128). This may look excessive, but
- works very well in practice. Most bins hold sizes that are
- unusual as malloc request sizes, but are more usual for fragments
- and consolidated sets of chunks, which is what these bins hold, so
- they can be found quickly. All procedures maintain the invariant
- that no consolidated chunk physically borders another one, so each
- chunk in a list is known to be preceeded and followed by either
- inuse chunks or the ends of memory.
-
- Chunks in bins are kept in size order, with ties going to the
- approximately least recently used chunk. Ordering isn't needed
- for the small bins, which all contain the same-sized chunks, but
- facilitates best-fit allocation for larger chunks. These lists
- are just sequential. Keeping them in order almost never requires
- enough traversal to warrant using fancier ordered data
- structures.
-
- Chunks of the same size are linked with the most
- recently freed at the front, and allocations are taken from the
- back. This results in LRU (FIFO) allocation order, which tends
- to give each chunk an equal opportunity to be consolidated with
- adjacent freed chunks, resulting in larger free chunks and less
- fragmentation.
-
- To simplify use in double-linked lists, each bin header acts
- as a malloc_chunk. This avoids special-casing for headers.
- But to conserve space and improve locality, we allocate
- only the fd/bk pointers of bins, and then use repositioning tricks
- to treat these as the fields of a malloc_chunk*.
-*/
-
-typedef struct malloc_chunk* mbinptr;
-
-/* addressing -- note that bin_at(0) does not exist */
-#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
-
-/* analog of ++bin */
-#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
-
-/* Reminders about list directionality within bins */
-#define first(b) ((b)->fd)
-#define last(b) ((b)->bk)
-
-/* Take a chunk off a bin list */
-#define unlink(P, BK, FD) { \
- FD = P->fd; \
- BK = P->bk; \
- FD->bk = BK; \
- BK->fd = FD; \
-}
-
-/*
- Indexing
-
- Bins for sizes < 512 bytes contain chunks of all the same size, spaced
- 8 bytes apart. Larger bins are approximately logarithmically spaced:
-
- 64 bins of size 8
- 32 bins of size 64
- 16 bins of size 512
- 8 bins of size 4096
- 4 bins of size 32768
- 2 bins of size 262144
- 1 bin of size what's left
-
- The bins top out around 1MB because we expect to service large
- requests via mmap.
-*/
-
-#define NBINS 96
-#define NSMALLBINS 32
-#define SMALLBIN_WIDTH 8
-#define MIN_LARGE_SIZE 256
-
-#define in_smallbin_range(sz) \
- ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
-
-#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
-
-/*
- Compute index for size. We expect this to be inlined when
- compiled with optimization, else not, which works out well.
-*/
-static int largebin_index(unsigned int sz) {
- unsigned int x = sz >> SMALLBIN_WIDTH;
- unsigned int m; /* bit position of highest set bit of m */
-
- if (x >= 0x10000) return NBINS-1;
-
- /* On intel, use BSRL instruction to find highest bit */
-#if defined(__GNUC__) && defined(i386)
-
- __asm__("bsrl %1,%0\n\t"
- : "=r" (m)
- : "g" (x));
-
-#else
- {
- /*
- Based on branch-free nlz algorithm in chapter 5 of Henry
- S. Warren Jr's book "Hacker's Delight".
- */
-
- unsigned int n = ((x - 0x100) >> 16) & 8;
- x <<= n;
- m = ((x - 0x1000) >> 16) & 4;
- n += m;
- x <<= m;
- m = ((x - 0x4000) >> 16) & 2;
- n += m;
- x = (x << m) >> 14;
- m = 13 - n + (x & ~(x>>1));
- }
-#endif
-
- /* Use next 2 bits to create finer-granularity bins */
- return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
-}
-
-#define bin_index(sz) \
- ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
-
-/*
- FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
- first bin that is maintained in sorted order. This must
- be the smallest size corresponding to a given bin.
-
- Normally, this should be MIN_LARGE_SIZE. But you can weaken
- best fit guarantees to sometimes speed up malloc by increasing value.
- Doing this means that malloc may choose a chunk that is
- non-best-fitting by up to the width of the bin.
-
- Some useful cutoff values:
- 512 - all bins sorted
- 2560 - leaves bins <= 64 bytes wide unsorted
- 12288 - leaves bins <= 512 bytes wide unsorted
- 65536 - leaves bins <= 4096 bytes wide unsorted
- 262144 - leaves bins <= 32768 bytes wide unsorted
- -1 - no bins sorted (not recommended!)
-*/
-
-#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
-/* #define FIRST_SORTED_BIN_SIZE 65536 */
-
-/*
- Unsorted chunks
-
- All remainders from chunk splits, as well as all returned chunks,
- are first placed in the "unsorted" bin. They are then placed
- in regular bins after malloc gives them ONE chance to be used before
- binning. So, basically, the unsorted_chunks list acts as a queue,
- with chunks being placed on it in free (and malloc_consolidate),
- and taken off (to be either used or placed in bins) in malloc.
-*/
-
-/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
-#define unsorted_chunks(M) (bin_at(M, 1))
-
-/*
- Top
-
- The top-most available chunk (i.e., the one bordering the end of
- available memory) is treated specially. It is never included in
- any bin, is used only if no other chunk is available, and is
- released back to the system if it is very large (see
- M_TRIM_THRESHOLD). Because top initially
- points to its own bin with initial zero size, thus forcing
- extension on the first malloc request, we avoid having any special
- code in malloc to check whether it even exists yet. But we still
- need to do so when getting memory from system, so we make
- initial_top treat the bin as a legal but unusable chunk during the
- interval between initialization and the first call to
- sYSMALLOc. (This is somewhat delicate, since it relies on
- the 2 preceding words to be zero during this interval as well.)
-*/
-
-/* Conveniently, the unsorted bin can be used as dummy top on first call */
-#define initial_top(M) (unsorted_chunks(M))
-
-/*
- Binmap
-
- To help compensate for the large number of bins, a one-level index
- structure is used for bin-by-bin searching. `binmap' is a
- bitvector recording whether bins are definitely empty so they can
- be skipped over during during traversals. The bits are NOT always
- cleared as soon as bins are empty, but instead only
- when they are noticed to be empty during traversal in malloc.
-*/
-
-/* Conservatively use 32 bits per map word, even if on 64bit system */
-#define BINMAPSHIFT 5
-#define BITSPERMAP (1U << BINMAPSHIFT)
-#define BINMAPSIZE (NBINS / BITSPERMAP)
-
-#define idx2block(i) ((i) >> BINMAPSHIFT)
-#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
-
-#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
-#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
-#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
-
-/*
- Fastbins
-
- An array of lists holding recently freed small chunks. Fastbins
- are not doubly linked. It is faster to single-link them, and
- since chunks are never removed from the middles of these lists,
- double linking is not necessary. Also, unlike regular bins, they
- are not even processed in FIFO order (they use faster LIFO) since
- ordering doesn't much matter in the transient contexts in which
- fastbins are normally used.
-
- Chunks in fastbins keep their inuse bit set, so they cannot
- be consolidated with other free chunks. malloc_consolidate
- releases all chunks in fastbins and consolidates them with
- other free chunks.
-*/
-
-typedef struct malloc_chunk* mfastbinptr;
-
-/* offset 2 to use otherwise unindexable first 2 bins */
-#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
-
-/* The maximum fastbin request size we support */
-#define MAX_FAST_SIZE 80
-
-#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
-
-/*
- FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
- that triggers automatic consolidation of possibly-surrounding
- fastbin chunks. This is a heuristic, so the exact value should not
- matter too much. It is defined at half the default trim threshold as a
- compromise heuristic to only attempt consolidation if it is likely
- to lead to trimming. However, it is not dynamically tunable, since
- consolidation reduces fragmentation surrounding loarge chunks even
- if trimming is not used.
-*/
-
-#define FASTBIN_CONSOLIDATION_THRESHOLD \
- ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
-
-/*
- Since the lowest 2 bits in max_fast don't matter in size comparisons,
- they are used as flags.
-*/
-
-/*
- ANYCHUNKS_BIT held in max_fast indicates that there may be any
- freed chunks at all. It is set true when entering a chunk into any
- bin.
-*/
-
-#define ANYCHUNKS_BIT (1U)
-
-#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
-#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
-#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
-
-/*
- FASTCHUNKS_BIT held in max_fast indicates that there are probably
- some fastbin chunks. It is set true on entering a chunk into any
- fastbin, and cleared only in malloc_consolidate.
-*/
-
-#define FASTCHUNKS_BIT (2U)
-
-#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
-#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
-#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
-
-/*
- Set value of max_fast.
- Use impossibly small value if 0.
-*/
-
-#define set_max_fast(M, s) \
- (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
- ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
-
-#define get_max_fast(M) \
- ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
-
-
-/*
- morecore_properties is a status word holding dynamically discovered
- or controlled properties of the morecore function
-*/
-
-#define MORECORE_CONTIGUOUS_BIT (1U)
-
-#define contiguous(M) \
- (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
-#define noncontiguous(M) \
- (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
-#define set_contiguous(M) \
- ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
-#define set_noncontiguous(M) \
- ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
-
-
-/*
- ----------- Internal state representation and initialization -----------
-*/
-
-struct malloc_state {
-
- /* The maximum chunk size to be eligible for fastbin */
- INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
-
- /* Fastbins */
- mfastbinptr fastbins[NFASTBINS];
-
- /* Base of the topmost chunk -- not otherwise kept in a bin */
- mchunkptr top;
-
- /* The remainder from the most recent split of a small request */
- mchunkptr last_remainder;
-
- /* Normal bins packed as described above */
- mchunkptr bins[NBINS * 2];
-
- /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
- unsigned int binmap[BINMAPSIZE+1];
-
- /* Tunable parameters */
- CHUNK_SIZE_T trim_threshold;
- INTERNAL_SIZE_T top_pad;
- INTERNAL_SIZE_T mmap_threshold;
-
- /* Memory map support */
- int n_mmaps;
- int n_mmaps_max;
- int max_n_mmaps;
-
- /* Cache malloc_getpagesize */
- unsigned int pagesize;
-
- /* Track properties of MORECORE */
- unsigned int morecore_properties;
-
- /* Statistics */
- INTERNAL_SIZE_T mmapped_mem;
- INTERNAL_SIZE_T sbrked_mem;
- INTERNAL_SIZE_T max_sbrked_mem;
- INTERNAL_SIZE_T max_mmapped_mem;
- INTERNAL_SIZE_T max_total_mem;
-};
-
-typedef struct malloc_state *mstate;
-
-/*
- There is exactly one instance of this struct in this malloc.
- If you are adapting this malloc in a way that does NOT use a static
- malloc_state, you MUST explicitly zero-fill it before using. This
- malloc relies on the property that malloc_state is initialized to
- all zeroes (as is true of C statics).
-*/
-
-static struct malloc_state av_; /* never directly referenced */
-
-/*
- All uses of av_ are via get_malloc_state().
- At most one "call" to get_malloc_state is made per invocation of
- the public versions of malloc and free, but other routines
- that in turn invoke malloc and/or free may call more then once.
- Also, it is called in check* routines if DEBUG is set.
-*/
-
-#define get_malloc_state() (&(av_))
-
-/*
- Initialize a malloc_state struct.
-
- This is called only from within malloc_consolidate, which needs
- be called in the same contexts anyway. It is never called directly
- outside of malloc_consolidate because some optimizing compilers try
- to inline it at all call points, which turns out not to be an
- optimization at all. (Inlining it in malloc_consolidate is fine though.)
-*/
-
-#if __STD_C
-static void malloc_init_state(mstate av)
-#else
-static void malloc_init_state(av) mstate av;
-#endif
-{
- int i;
- mbinptr bin;
-
- /* Establish circular links for normal bins */
- for (i = 1; i < NBINS; ++i) {
- bin = bin_at(av,i);
- bin->fd = bin->bk = bin;
- }
-
- av->top_pad = DEFAULT_TOP_PAD;
- av->n_mmaps_max = DEFAULT_MMAP_MAX;
- av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
- av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
-
-#if MORECORE_CONTIGUOUS
- set_contiguous(av);
-#else
- set_noncontiguous(av);
-#endif
-
-
- set_max_fast(av, DEFAULT_MXFAST);
-
- av->top = initial_top(av);
- av->pagesize = malloc_getpagesize;
-}
-
-/*
- Other internal utilities operating on mstates
-*/
-
-#if __STD_C
-static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
-static int sYSTRIm(size_t, mstate);
-static void malloc_consolidate(mstate);
-static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
-#else
-static Void_t* sYSMALLOc();
-static int sYSTRIm();
-static void malloc_consolidate();
-static Void_t** iALLOc();
-#endif
-
-/*
- Debugging support
-
- These routines make a number of assertions about the states
- of data structures that should be true at all times. If any
- are not true, it's very likely that a user program has somehow
- trashed memory. (It's also possible that there is a coding error
- in malloc. In which case, please report it!)
-*/
-
-#if ! DEBUG
-
-#define check_chunk(P)
-#define check_free_chunk(P)
-#define check_inuse_chunk(P)
-#define check_remalloced_chunk(P,N)
-#define check_malloced_chunk(P,N)
-#define check_malloc_state()
-
-#else
-#define check_chunk(P) do_check_chunk(P)
-#define check_free_chunk(P) do_check_free_chunk(P)
-#define check_inuse_chunk(P) do_check_inuse_chunk(P)
-#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
-#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
-#define check_malloc_state() do_check_malloc_state()
-
-/*
- Properties of all chunks
-*/
-
-#if __STD_C
-static void do_check_chunk(mchunkptr p)
-#else
-static void do_check_chunk(p) mchunkptr p;
-#endif
-{
- mstate av = get_malloc_state();
- CHUNK_SIZE_T sz = chunksize(p);
- /* min and max possible addresses assuming contiguous allocation */
- char* max_address = (char*)(av->top) + chunksize(av->top);
- char* min_address = max_address - av->sbrked_mem;
-
- if (!chunk_is_mmapped(p)) {
-
- /* Has legal address ... */
- if (p != av->top) {
- if (contiguous(av)) {
- assert(((char*)p) >= min_address);
- assert(((char*)p + sz) <= ((char*)(av->top)));
- }
- }
- else {
- /* top size is always at least MINSIZE */
- assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
- /* top predecessor always marked inuse */
- assert(prev_inuse(p));
- }
-
- }
- else {
-#if HAVE_MMAP
- /* address is outside main heap */
- if (contiguous(av) && av->top != initial_top(av)) {
- assert(((char*)p) < min_address || ((char*)p) > max_address);
- }
- /* chunk is page-aligned */
- assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
- /* mem is aligned */
- assert(aligned_OK(chunk2mem(p)));
-#else
- /* force an appropriate assert violation if debug set */
- assert(!chunk_is_mmapped(p));
-#endif
- }
-}
-
-/*
- Properties of free chunks
-*/
-
-#if __STD_C
-static void do_check_free_chunk(mchunkptr p)
-#else
-static void do_check_free_chunk(p) mchunkptr p;
-#endif
-{
- mstate av = get_malloc_state();
-
- INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
- mchunkptr next = chunk_at_offset(p, sz);
-
- do_check_chunk(p);
-
- /* Chunk must claim to be free ... */
- assert(!inuse(p));
- assert (!chunk_is_mmapped(p));
-
- /* Unless a special marker, must have OK fields */
- if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
- {
- assert((sz & MALLOC_ALIGN_MASK) == 0);
- assert(aligned_OK(chunk2mem(p)));
- /* ... matching footer field */
- assert(next->prev_size == sz);
- /* ... and is fully consolidated */
- assert(prev_inuse(p));
- assert (next == av->top || inuse(next));
-
- /* ... and has minimally sane links */
- assert(p->fd->bk == p);
- assert(p->bk->fd == p);
- }
- else /* markers are always of size SIZE_SZ */
- assert(sz == SIZE_SZ);
-}
-
-/*
- Properties of inuse chunks
-*/
-
-#if __STD_C
-static void do_check_inuse_chunk(mchunkptr p)
-#else
-static void do_check_inuse_chunk(p) mchunkptr p;
-#endif
-{
- mstate av = get_malloc_state();
- mchunkptr next;
- do_check_chunk(p);
-
- if (chunk_is_mmapped(p))
- return; /* mmapped chunks have no next/prev */
-
- /* Check whether it claims to be in use ... */
- assert(inuse(p));
-
- next = next_chunk(p);
-
- /* ... and is surrounded by OK chunks.
- Since more things can be checked with free chunks than inuse ones,
- if an inuse chunk borders them and debug is on, it's worth doing them.
- */
- if (!prev_inuse(p)) {
- /* Note that we cannot even look at prev unless it is not inuse */
- mchunkptr prv = prev_chunk(p);
- assert(next_chunk(prv) == p);
- do_check_free_chunk(prv);
- }
-
- if (next == av->top) {
- assert(prev_inuse(next));
- assert(chunksize(next) >= MINSIZE);
- }
- else if (!inuse(next))
- do_check_free_chunk(next);
-}
-
-/*
- Properties of chunks recycled from fastbins
-*/
-
-#if __STD_C
-static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
-#else
-static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
-#endif
-{
- INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
-
- do_check_inuse_chunk(p);
-
- /* Legal size ... */
- assert((sz & MALLOC_ALIGN_MASK) == 0);
- assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
- /* ... and alignment */
- assert(aligned_OK(chunk2mem(p)));
- /* chunk is less than MINSIZE more than request */
- assert((long)(sz) - (long)(s) >= 0);
- assert((long)(sz) - (long)(s + MINSIZE) < 0);
-}
-
-/*
- Properties of nonrecycled chunks at the point they are malloced
-*/
-
-#if __STD_C
-static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
-#else
-static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
-#endif
-{
- /* same as recycled case ... */
- do_check_remalloced_chunk(p, s);
-
- /*
- ... plus, must obey implementation invariant that prev_inuse is
- always true of any allocated chunk; i.e., that each allocated
- chunk borders either a previously allocated and still in-use
- chunk, or the base of its memory arena. This is ensured
- by making all allocations from the the `lowest' part of any found
- chunk. This does not necessarily hold however for chunks
- recycled via fastbins.
- */
-
- assert(prev_inuse(p));
-}
-
-
-/*
- Properties of malloc_state.
-
- This may be useful for debugging malloc, as well as detecting user
- programmer errors that somehow write into malloc_state.
-
- If you are extending or experimenting with this malloc, you can
- probably figure out how to hack this routine to print out or
- display chunk addresses, sizes, bins, and other instrumentation.
-*/
-
-static void do_check_malloc_state()
-{
- mstate av = get_malloc_state();
- int i;
- mchunkptr p;
- mchunkptr q;
- mbinptr b;
- unsigned int binbit;
- int empty;
- unsigned int idx;
- INTERNAL_SIZE_T size;
- CHUNK_SIZE_T total = 0;
- int max_fast_bin;
-
- /* internal size_t must be no wider than pointer type */
- assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
-
- /* alignment is a power of 2 */
- assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
-
- /* cannot run remaining checks until fully initialized */
- if (av->top == 0 || av->top == initial_top(av))
- return;
-
- /* pagesize is a power of 2 */
- assert((av->pagesize & (av->pagesize-1)) == 0);
-
- /* properties of fastbins */
-
- /* max_fast is in allowed range */
- assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
-
- max_fast_bin = fastbin_index(av->max_fast);
-
- for (i = 0; i < NFASTBINS; ++i) {
- p = av->fastbins[i];
-
- /* all bins past max_fast are empty */
- if (i > max_fast_bin)
- assert(p == 0);
-
- while (p != 0) {
- /* each chunk claims to be inuse */
- do_check_inuse_chunk(p);
- total += chunksize(p);
- /* chunk belongs in this bin */
- assert(fastbin_index(chunksize(p)) == i);
- p = p->fd;
- }
- }
-
- if (total != 0)
- assert(have_fastchunks(av));
- else if (!have_fastchunks(av))
- assert(total == 0);
-
- /* check normal bins */
- for (i = 1; i < NBINS; ++i) {
- b = bin_at(av,i);
-
- /* binmap is accurate (except for bin 1 == unsorted_chunks) */
- if (i >= 2) {
- binbit = get_binmap(av,i);
- empty = last(b) == b;
- if (!binbit)
- assert(empty);
- else if (!empty)
- assert(binbit);
- }
-
- for (p = last(b); p != b; p = p->bk) {
- /* each chunk claims to be free */
- do_check_free_chunk(p);
- size = chunksize(p);
- total += size;
- if (i >= 2) {
- /* chunk belongs in bin */
- idx = bin_index(size);
- assert(idx == i);
- /* lists are sorted */
- if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
- assert(p->bk == b ||
- (CHUNK_SIZE_T)chunksize(p->bk) >=
- (CHUNK_SIZE_T)chunksize(p));
- }
- }
- /* chunk is followed by a legal chain of inuse chunks */
- for (q = next_chunk(p);
- (q != av->top && inuse(q) &&
- (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
- q = next_chunk(q))
- do_check_inuse_chunk(q);
- }
- }
-
- /* top chunk is OK */
- check_chunk(av->top);
-
- /* sanity checks for statistics */
-
- assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
- assert(av->n_mmaps >= 0);
- assert(av->n_mmaps <= av->max_n_mmaps);
-
- assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
- (CHUNK_SIZE_T)(av->max_sbrked_mem));
-
- assert((CHUNK_SIZE_T)(av->mmapped_mem) <=
- (CHUNK_SIZE_T)(av->max_mmapped_mem));
-
- assert((CHUNK_SIZE_T)(av->max_total_mem) >=
- (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem));
-}
-#endif
-
-
-/* ----------- Routines dealing with system allocation -------------- */
-
-/*
- sysmalloc handles malloc cases requiring more memory from the system.
- On entry, it is assumed that av->top does not have enough
- space to service request for nb bytes, thus requiring that av->top
- be extended or replaced.
-*/
-
-#if __STD_C
-static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
-#else
-static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
-#endif
-{
- mchunkptr old_top; /* incoming value of av->top */
- INTERNAL_SIZE_T old_size; /* its size */
- char* old_end; /* its end address */
-
- long size; /* arg to first MORECORE or mmap call */
- char* brk; /* return value from MORECORE */
-
- long correction; /* arg to 2nd MORECORE call */
- char* snd_brk; /* 2nd return val */
-
- INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
- INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
- char* aligned_brk; /* aligned offset into brk */
-
- mchunkptr p; /* the allocated/returned chunk */
- mchunkptr remainder; /* remainder from allocation */
- CHUNK_SIZE_T remainder_size; /* its size */
-
- CHUNK_SIZE_T sum; /* for updating stats */
-
- size_t pagemask = av->pagesize - 1;
-
- /*
- If there is space available in fastbins, consolidate and retry
- malloc from scratch rather than getting memory from system. This
- can occur only if nb is in smallbin range so we didn't consolidate
- upon entry to malloc. It is much easier to handle this case here
- than in malloc proper.
- */
-
- if (have_fastchunks(av)) {
- assert(in_smallbin_range(nb));
- malloc_consolidate(av);
- return mALLOc(nb - MALLOC_ALIGN_MASK);
- }
-
-
-#if HAVE_MMAP
-
- /*
- If have mmap, and the request size meets the mmap threshold, and
- the system supports mmap, and there are few enough currently
- allocated mmapped regions, try to directly map this request
- rather than expanding top.
- */
-
- if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
- (av->n_mmaps < av->n_mmaps_max)) {
-
- char* mm; /* return value from mmap call*/
-
- /*
- Round up size to nearest page. For mmapped chunks, the overhead
- is one SIZE_SZ unit larger than for normal chunks, because there
- is no following chunk whose prev_size field could be used.
- */
- size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
-
- /* Don't try if size wraps around 0 */
- if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
-
- mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
-
- if (mm != (char*)(MORECORE_FAILURE)) {
-
- /*
- The offset to the start of the mmapped region is stored
- in the prev_size field of the chunk. This allows us to adjust
- returned start address to meet alignment requirements here
- and in memalign(), and still be able to compute proper
- address argument for later munmap in free() and realloc().
- */
-
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0) {
- correction = MALLOC_ALIGNMENT - front_misalign;
- p = (mchunkptr)(mm + correction);
- p->prev_size = correction;
- set_head(p, (size - correction) |IS_MMAPPED);
- }
- else {
- p = (mchunkptr)mm;
- p->prev_size = 0;
- set_head(p, size|IS_MMAPPED);
- }
-
- /* update statistics */
-
- if (++av->n_mmaps > av->max_n_mmaps)
- av->max_n_mmaps = av->n_mmaps;
-
- sum = av->mmapped_mem += size;
- if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
- av->max_mmapped_mem = sum;
- sum += av->sbrked_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
- av->max_total_mem = sum;
-
- check_chunk(p);
-
- return chunk2mem(p);
- }
- }
- }
-#endif
-
- /* Record incoming configuration of top */
-
- old_top = av->top;
- old_size = chunksize(old_top);
- old_end = (char*)(chunk_at_offset(old_top, old_size));
-
- brk = snd_brk = (char*)(MORECORE_FAILURE);
-
- /*
- If not the first time through, we require old_size to be
- at least MINSIZE and to have prev_inuse set.
- */
-
- assert((old_top == initial_top(av) && old_size == 0) ||
- ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
- prev_inuse(old_top)));
-
- /* Precondition: not enough current space to satisfy nb request */
- assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
-
- /* Precondition: all fastbins are consolidated */
- assert(!have_fastchunks(av));
-
-
- /* Request enough space for nb + pad + overhead */
-
- size = nb + av->top_pad + MINSIZE;
-
- /*
- If contiguous, we can subtract out existing space that we hope to
- combine with new space. We add it back later only if
- we don't actually get contiguous space.
- */
-
- if (contiguous(av))
- size -= old_size;
-
- /*
- Round to a multiple of page size.
- If MORECORE is not contiguous, this ensures that we only call it
- with whole-page arguments. And if MORECORE is contiguous and
- this is not first time through, this preserves page-alignment of
- previous calls. Otherwise, we correct to page-align below.
- */
-
- size = (size + pagemask) & ~pagemask;
-
- /*
- Don't try to call MORECORE if argument is so big as to appear
- negative. Note that since mmap takes size_t arg, it may succeed
- below even if we cannot call MORECORE.
- */
-
- if (size > 0)
- brk = (char*)(MORECORE(size));
-
- /*
- If have mmap, try using it as a backup when MORECORE fails or
- cannot be used. This is worth doing on systems that have "holes" in
- address space, so sbrk cannot extend to give contiguous space, but
- space is available elsewhere. Note that we ignore mmap max count
- and threshold limits, since the space will not be used as a
- segregated mmap region.
- */
-
-#if HAVE_MMAP
- if (brk == (char*)(MORECORE_FAILURE)) {
-
- /* Cannot merge with old top, so add its size back in */
- if (contiguous(av))
- size = (size + old_size + pagemask) & ~pagemask;
-
- /* If we are relying on mmap as backup, then use larger units */
- if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
- size = MMAP_AS_MORECORE_SIZE;
-
- /* Don't try if size wraps around 0 */
- if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
-
- brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
-
- if (brk != (char*)(MORECORE_FAILURE)) {
-
- /* We do not need, and cannot use, another sbrk call to find end */
- snd_brk = brk + size;
-
- /*
- Record that we no longer have a contiguous sbrk region.
- After the first time mmap is used as backup, we do not
- ever rely on contiguous space since this could incorrectly
- bridge regions.
- */
- set_noncontiguous(av);
- }
- }
- }
-#endif
-
- if (brk != (char*)(MORECORE_FAILURE)) {
- av->sbrked_mem += size;
-
- /*
- If MORECORE extends previous space, we can likewise extend top size.
- */
-
- if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
- set_head(old_top, (size + old_size) | PREV_INUSE);
- }
-
- /*
- Otherwise, make adjustments:
-
- * If the first time through or noncontiguous, we need to call sbrk
- just to find out where the end of memory lies.
-
- * We need to ensure that all returned chunks from malloc will meet
- MALLOC_ALIGNMENT
-
- * If there was an intervening foreign sbrk, we need to adjust sbrk
- request size to account for fact that we will not be able to
- combine new space with existing space in old_top.
-
- * Almost all systems internally allocate whole pages at a time, in
- which case we might as well use the whole last page of request.
- So we allocate enough more memory to hit a page boundary now,
- which in turn causes future contiguous calls to page-align.
- */
-
- else {
- front_misalign = 0;
- end_misalign = 0;
- correction = 0;
- aligned_brk = brk;
-
- /*
- If MORECORE returns an address lower than we have seen before,
- we know it isn't really contiguous. This and some subsequent
- checks help cope with non-conforming MORECORE functions and
- the presence of "foreign" calls to MORECORE from outside of
- malloc or by other threads. We cannot guarantee to detect
- these in all cases, but cope with the ones we do detect.
- */
- if (contiguous(av) && old_size != 0 && brk < old_end) {
- set_noncontiguous(av);
- }
-
- /* handle contiguous cases */
- if (contiguous(av)) {
-
- /*
- We can tolerate forward non-contiguities here (usually due
- to foreign calls) but treat them as part of our space for
- stats reporting.
- */
- if (old_size != 0)
- av->sbrked_mem += brk - old_end;
-
- /* Guarantee alignment of first new chunk made from this space */
-
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0) {
-
- /*
- Skip over some bytes to arrive at an aligned position.
- We don't need to specially mark these wasted front bytes.
- They will never be accessed anyway because
- prev_inuse of av->top (and any chunk created from its start)
- is always true after initialization.
- */
-
- correction = MALLOC_ALIGNMENT - front_misalign;
- aligned_brk += correction;
- }
-
- /*
- If this isn't adjacent to existing space, then we will not
- be able to merge with old_top space, so must add to 2nd request.
- */
-
- correction += old_size;
-
- /* Extend the end address to hit a page boundary */
- end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
- correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
-
- assert(correction >= 0);
- snd_brk = (char*)(MORECORE(correction));
-
- if (snd_brk == (char*)(MORECORE_FAILURE)) {
- /*
- If can't allocate correction, try to at least find out current
- brk. It might be enough to proceed without failing.
- */
- correction = 0;
- snd_brk = (char*)(MORECORE(0));
- }
- else if (snd_brk < brk) {
- /*
- If the second call gives noncontiguous space even though
- it says it won't, the only course of action is to ignore
- results of second call, and conservatively estimate where
- the first call left us. Also set noncontiguous, so this
- won't happen again, leaving at most one hole.
-
- Note that this check is intrinsically incomplete. Because
- MORECORE is allowed to give more space than we ask for,
- there is no reliable way to detect a noncontiguity
- producing a forward gap for the second call.
- */
- snd_brk = brk + size;
- correction = 0;
- set_noncontiguous(av);
- }
-
- }
-
- /* handle non-contiguous cases */
- else {
- /* MORECORE/mmap must correctly align */
- assert(aligned_OK(chunk2mem(brk)));
-
- /* Find out current end of memory */
- if (snd_brk == (char*)(MORECORE_FAILURE)) {
- snd_brk = (char*)(MORECORE(0));
- av->sbrked_mem += snd_brk - brk - size;
- }
- }
-
- /* Adjust top based on results of second sbrk */
- if (snd_brk != (char*)(MORECORE_FAILURE)) {
- av->top = (mchunkptr)aligned_brk;
- set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
- av->sbrked_mem += correction;
-
- /*
- If not the first time through, we either have a
- gap due to foreign sbrk or a non-contiguous region. Insert a
- double fencepost at old_top to prevent consolidation with space
- we don't own. These fenceposts are artificial chunks that are
- marked as inuse and are in any case too small to use. We need
- two to make sizes and alignments work out.
- */
-
- if (old_size != 0) {
- /*
- Shrink old_top to insert fenceposts, keeping size a
- multiple of MALLOC_ALIGNMENT. We know there is at least
- enough space in old_top to do this.
- */
- old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
- set_head(old_top, old_size | PREV_INUSE);
-
- /*
- Note that the following assignments completely overwrite
- old_top when old_size was previously MINSIZE. This is
- intentional. We need the fencepost, even if old_top otherwise gets
- lost.
- */
- chunk_at_offset(old_top, old_size )->size =
- SIZE_SZ|PREV_INUSE;
-
- chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
- SIZE_SZ|PREV_INUSE;
-
- /*
- If possible, release the rest, suppressing trimming.
- */
- if (old_size >= MINSIZE) {
- INTERNAL_SIZE_T tt = av->trim_threshold;
- av->trim_threshold = (INTERNAL_SIZE_T)(-1);
- fREe(chunk2mem(old_top));
- av->trim_threshold = tt;
- }
- }
- }
- }
-
- /* Update statistics */
- sum = av->sbrked_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
- av->max_sbrked_mem = sum;
-
- sum += av->mmapped_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
- av->max_total_mem = sum;
-
- check_malloc_state();
-
- /* finally, do the allocation */
-
- p = av->top;
- size = chunksize(p);
-
- /* check that one of the above allocation paths succeeded */
- if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
- remainder_size = size - nb;
- remainder = chunk_at_offset(p, nb);
- av->top = remainder;
- set_head(p, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- check_malloced_chunk(p, nb);
- return chunk2mem(p);
- }
-
- }
-
- /* catch all failure paths */
- MALLOC_FAILURE_ACTION;
- return 0;
-}
-
-
-
-
-/*
- sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
- to the system (via negative arguments to sbrk) if there is unused
- memory at the `high' end of the malloc pool. It is called
- automatically by free() when top space exceeds the trim
- threshold. It is also called by the public malloc_trim routine. It
- returns 1 if it actually released any memory, else 0.
-*/
-
-#if __STD_C
-static int sYSTRIm(size_t pad, mstate av)
-#else
-static int sYSTRIm(pad, av) size_t pad; mstate av;
-#endif
-{
- long top_size; /* Amount of top-most memory */
- long extra; /* Amount to release */
- long released; /* Amount actually released */
- char* current_brk; /* address returned by pre-check sbrk call */
- char* new_brk; /* address returned by post-check sbrk call */
- size_t pagesz;
-
- pagesz = av->pagesize;
- top_size = chunksize(av->top);
-
- /* Release in pagesize units, keeping at least one page */
- extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
-
- if (extra > 0) {
-
- /*
- Only proceed if end of memory is where we last set it.
- This avoids problems if there were foreign sbrk calls.
- */
- current_brk = (char*)(MORECORE(0));
- if (current_brk == (char*)(av->top) + top_size) {
-
- /*
- Attempt to release memory. We ignore MORECORE return value,
- and instead call again to find out where new end of memory is.
- This avoids problems if first call releases less than we asked,
- of if failure somehow altered brk value. (We could still
- encounter problems if it altered brk in some very bad way,
- but the only thing we can do is adjust anyway, which will cause
- some downstream failure.)
- */
-
- MORECORE(-extra);
- new_brk = (char*)(MORECORE(0));
-
- if (new_brk != (char*)MORECORE_FAILURE) {
- released = (long)(current_brk - new_brk);
-
- if (released != 0) {
- /* Success. Adjust top. */
- av->sbrked_mem -= released;
- set_head(av->top, (top_size - released) | PREV_INUSE);
- check_malloc_state();
- return 1;
- }
- }
- }
- }
- return 0;
-}
-
-/*
- ------------------------------ malloc ------------------------------
-*/
-
-
-#if __STD_C
-Void_t* mALLOc(size_t bytes)
-#else
- Void_t* mALLOc(bytes) size_t bytes;
-#endif
-{
- mstate av = get_malloc_state();
-
- INTERNAL_SIZE_T nb; /* normalized request size */
- unsigned int idx; /* associated bin index */
- mbinptr bin; /* associated bin */
- mfastbinptr* fb; /* associated fastbin */
-
- mchunkptr victim; /* inspected/selected chunk */
- INTERNAL_SIZE_T size; /* its size */
- int victim_index; /* its bin index */
-
- mchunkptr remainder; /* remainder from a split */
- CHUNK_SIZE_T remainder_size; /* its size */
-
- unsigned int block; /* bit map traverser */
- unsigned int bit; /* bit map traverser */
- unsigned int map; /* current word of binmap */
-
- mchunkptr fwd; /* misc temp for linking */
- mchunkptr bck; /* misc temp for linking */
-
- /*
- Convert request size to internal form by adding SIZE_SZ bytes
- overhead plus possibly more to obtain necessary alignment and/or
- to obtain a size of at least MINSIZE, the smallest allocatable
- size. Also, checked_request2size traps (returning 0) request sizes
- that are so large that they wrap around zero when padded and
- aligned.
- */
-
- checked_request2size(bytes, nb);
-
- /*
- Bypass search if no frees yet
- */
- if (!have_anychunks(av)) {
- if (av->max_fast == 0) /* initialization check */
- malloc_consolidate(av);
- goto use_top;
- }
-
- /*
- If the size qualifies as a fastbin, first check corresponding bin.
- */
-
- if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
- fb = &(av->fastbins[(fastbin_index(nb))]);
- if ( (victim = *fb) != 0) {
- *fb = victim->fd;
- check_remalloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
-
- /*
- If a small request, check regular bin. Since these "smallbins"
- hold one size each, no searching within bins is necessary.
- (For a large request, we need to wait until unsorted chunks are
- processed to find best fit. But for small ones, fits are exact
- anyway, so we can check now, which is faster.)
- */
-
- if (in_smallbin_range(nb)) {
- idx = smallbin_index(nb);
- bin = bin_at(av,idx);
-
- if ( (victim = last(bin)) != bin) {
- bck = victim->bk;
- set_inuse_bit_at_offset(victim, nb);
- bin->bk = bck;
- bck->fd = bin;
-
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
-
- /*
- If this is a large request, consolidate fastbins before continuing.
- While it might look excessive to kill all fastbins before
- even seeing if there is space available, this avoids
- fragmentation problems normally associated with fastbins.
- Also, in practice, programs tend to have runs of either small or
- large requests, but less often mixtures, so consolidation is not
- invoked all that often in most programs. And the programs that
- it is called frequently in otherwise tend to fragment.
- */
-
- else {
- idx = largebin_index(nb);
- if (have_fastchunks(av))
- malloc_consolidate(av);
- }
-
- /*
- Process recently freed or remaindered chunks, taking one only if
- it is exact fit, or, if this a small request, the chunk is remainder from
- the most recent non-exact fit. Place other traversed chunks in
- bins. Note that this step is the only place in any routine where
- chunks are placed in bins.
- */
-
- while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
- bck = victim->bk;
- size = chunksize(victim);
-
- /*
- If a small request, try to use last remainder if it is the
- only chunk in unsorted bin. This helps promote locality for
- runs of consecutive small requests. This is the only
- exception to best-fit, and applies only when there is
- no exact fit for a small chunk.
- */
-
- if (in_smallbin_range(nb) &&
- bck == unsorted_chunks(av) &&
- victim == av->last_remainder &&
- (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
-
- /* split and reattach remainder */
- remainder_size = size - nb;
- remainder = chunk_at_offset(victim, nb);
- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
- av->last_remainder = remainder;
- remainder->bk = remainder->fd = unsorted_chunks(av);
-
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
-
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /* remove from unsorted list */
- unsorted_chunks(av)->bk = bck;
- bck->fd = unsorted_chunks(av);
-
- /* Take now instead of binning if exact fit */
-
- if (size == nb) {
- set_inuse_bit_at_offset(victim, size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /* place chunk in bin */
-
- if (in_smallbin_range(size)) {
- victim_index = smallbin_index(size);
- bck = bin_at(av, victim_index);
- fwd = bck->fd;
- }
- else {
- victim_index = largebin_index(size);
- bck = bin_at(av, victim_index);
- fwd = bck->fd;
-
- if (fwd != bck) {
- /* if smaller than smallest, place first */
- if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
- fwd = bck;
- bck = bck->bk;
- }
- else if ((CHUNK_SIZE_T)(size) >=
- (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
-
- /* maintain large bins in sorted order */
- size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
- while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
- fwd = fwd->fd;
- bck = fwd->bk;
- }
- }
- }
-
- mark_bin(av, victim_index);
- victim->bk = bck;
- victim->fd = fwd;
- fwd->bk = victim;
- bck->fd = victim;
- }
-
- /*
- If a large request, scan through the chunks of current bin to
- find one that fits. (This will be the smallest that fits unless
- FIRST_SORTED_BIN_SIZE has been changed from default.) This is
- the only step where an unbounded number of chunks might be
- scanned without doing anything useful with them. However the
- lists tend to be short.
- */
-
- if (!in_smallbin_range(nb)) {
- bin = bin_at(av, idx);
-
- for (victim = last(bin); victim != bin; victim = victim->bk) {
- size = chunksize(victim);
-
- if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
- remainder_size = size - nb;
- unlink(victim, bck, fwd);
-
- /* Exhaust */
- if (remainder_size < MINSIZE) {
- set_inuse_bit_at_offset(victim, size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- /* Split */
- else {
- remainder = chunk_at_offset(victim, nb);
- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
- remainder->bk = remainder->fd = unsorted_chunks(av);
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
- }
- }
-
- /*
- Search for a chunk by scanning bins, starting with next largest
- bin. This search is strictly by best-fit; i.e., the smallest
- (with ties going to approximately the least recently used) chunk
- that fits is selected.
-
- The bitmap avoids needing to check that most blocks are nonempty.
- */
-
- ++idx;
- bin = bin_at(av,idx);
- block = idx2block(idx);
- map = av->binmap[block];
- bit = idx2bit(idx);
-
- for (;;) {
-
- /* Skip rest of block if there are no more set bits in this block. */
- if (bit > map || bit == 0) {
- do {
- if (++block >= BINMAPSIZE) /* out of bins */
- goto use_top;
- } while ( (map = av->binmap[block]) == 0);
-
- bin = bin_at(av, (block << BINMAPSHIFT));
- bit = 1;
- }
-
- /* Advance to bin with set bit. There must be one. */
- while ((bit & map) == 0) {
- bin = next_bin(bin);
- bit <<= 1;
- assert(bit != 0);
- }
-
- /* Inspect the bin. It is likely to be non-empty */
- victim = last(bin);
-
- /* If a false alarm (empty bin), clear the bit. */
- if (victim == bin) {
- av->binmap[block] = map &= ~bit; /* Write through */
- bin = next_bin(bin);
- bit <<= 1;
- }
-
- else {
- size = chunksize(victim);
-
- /* We know the first chunk in this bin is big enough to use. */
- assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
-
- remainder_size = size - nb;
-
- /* unlink */
- bck = victim->bk;
- bin->bk = bck;
- bck->fd = bin;
-
- /* Exhaust */
- if (remainder_size < MINSIZE) {
- set_inuse_bit_at_offset(victim, size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /* Split */
- else {
- remainder = chunk_at_offset(victim, nb);
-
- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
- remainder->bk = remainder->fd = unsorted_chunks(av);
- /* advertise as last remainder */
- if (in_smallbin_range(nb))
- av->last_remainder = remainder;
-
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
- }
- }
-
- use_top:
- /*
- If large enough, split off the chunk bordering the end of memory
- (held in av->top). Note that this is in accord with the best-fit
- search rule. In effect, av->top is treated as larger (and thus
- less well fitting) than any other available chunk since it can
- be extended to be as large as necessary (up to system
- limitations).
-
- We require that av->top always exists (i.e., has size >=
- MINSIZE) after initialization, so if it would otherwise be
- exhuasted by current request, it is replenished. (The main
- reason for ensuring it exists is that we may need MINSIZE space
- to put in fenceposts in sysmalloc.)
- */
-
- victim = av->top;
- size = chunksize(victim);
-
- if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
- remainder_size = size - nb;
- remainder = chunk_at_offset(victim, nb);
- av->top = remainder;
- set_head(victim, nb | PREV_INUSE);
- set_head(remainder, remainder_size | PREV_INUSE);
-
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- /*
- If no space in top, relay to handle system-dependent cases
- */
- return sYSMALLOc(nb, av);
-}
-
-/*
- ------------------------------ free ------------------------------
-*/
-
-#if __STD_C
-void fREe(Void_t* mem)
-#else
-void fREe(mem) Void_t* mem;
-#endif
-{
- mstate av = get_malloc_state();
-
- mchunkptr p; /* chunk corresponding to mem */
- INTERNAL_SIZE_T size; /* its size */
- mfastbinptr* fb; /* associated fastbin */
- mchunkptr nextchunk; /* next contiguous chunk */
- INTERNAL_SIZE_T nextsize; /* its size */
- int nextinuse; /* true if nextchunk is used */
- INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
-
- /* free(0) has no effect */
- if (mem != 0) {
- p = mem2chunk(mem);
- size = chunksize(p);
-
- check_inuse_chunk(p);
-
- /*
- If eligible, place chunk on a fastbin so it can be found
- and used quickly in malloc.
- */
-
- if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
-
-#if TRIM_FASTBINS
- /*
- If TRIM_FASTBINS set, don't place chunks
- bordering top into fastbins
- */
- && (chunk_at_offset(p, size) != av->top)
-#endif
- ) {
-
- set_fastchunks(av);
- fb = &(av->fastbins[fastbin_index(size)]);
- p->fd = *fb;
- *fb = p;
- }
-
- /*
- Consolidate other non-mmapped chunks as they arrive.
- */
-
- else if (!chunk_is_mmapped(p)) {
- set_anychunks(av);
-
- nextchunk = chunk_at_offset(p, size);
- nextsize = chunksize(nextchunk);
-
- /* consolidate backward */
- if (!prev_inuse(p)) {
- prevsize = p->prev_size;
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
- }
-
- if (nextchunk != av->top) {
- /* get and clear inuse bit */
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- set_head(nextchunk, nextsize);
-
- /* consolidate forward */
- if (!nextinuse) {
- unlink(nextchunk, bck, fwd);
- size += nextsize;
- }
-
- /*
- Place the chunk in unsorted chunk list. Chunks are
- not placed into regular bins until after they have
- been given one chance to be used in malloc.
- */
-
- bck = unsorted_chunks(av);
- fwd = bck->fd;
- p->bk = bck;
- p->fd = fwd;
- bck->fd = p;
- fwd->bk = p;
-
- set_head(p, size | PREV_INUSE);
- set_foot(p, size);
-
- check_free_chunk(p);
- }
-
- /*
- If the chunk borders the current high end of memory,
- consolidate into top
- */
-
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- check_chunk(p);
- }
-
- /*
- If freeing a large space, consolidate possibly-surrounding
- chunks. Then, if the total unused topmost memory exceeds trim
- threshold, ask malloc_trim to reduce top.
-
- Unless max_fast is 0, we don't know if there are fastbins
- bordering top, so we cannot tell for sure whether threshold
- has been reached unless fastbins are consolidated. But we
- don't want to consolidate on each free. As a compromise,
- consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
- is reached.
- */
-
- if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
- if (have_fastchunks(av))
- malloc_consolidate(av);
-
-#ifndef MORECORE_CANNOT_TRIM
- if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
- (CHUNK_SIZE_T)(av->trim_threshold))
- sYSTRIm(av->top_pad, av);
-#endif
- }
-
- }
- /*
- If the chunk was allocated via mmap, release via munmap()
- Note that if HAVE_MMAP is false but chunk_is_mmapped is
- true, then user must have overwritten memory. There's nothing
- we can do to catch this error unless DEBUG is set, in which case
- check_inuse_chunk (above) will have triggered error.
- */
-
- else {
-#if HAVE_MMAP
- int ret;
- INTERNAL_SIZE_T offset = p->prev_size;
- av->n_mmaps--;
- av->mmapped_mem -= (size + offset);
- ret = munmap((char*)p - offset, size + offset);
- /* munmap returns non-zero on failure */
- assert(ret == 0);
-#endif
- }
- }
-}
-
-/*
- ------------------------- malloc_consolidate -------------------------
-
- malloc_consolidate is a specialized version of free() that tears
- down chunks held in fastbins. Free itself cannot be used for this
- purpose since, among other things, it might place chunks back onto
- fastbins. So, instead, we need to use a minor variant of the same
- code.
-
- Also, because this routine needs to be called the first time through
- malloc anyway, it turns out to be the perfect place to trigger
- initialization code.
-*/
-
-#if __STD_C
-static void malloc_consolidate(mstate av)
-#else
-static void malloc_consolidate(av) mstate av;
-#endif
-{
- mfastbinptr* fb; /* current fastbin being consolidated */
- mfastbinptr* maxfb; /* last fastbin (for loop control) */
- mchunkptr p; /* current chunk being consolidated */
- mchunkptr nextp; /* next chunk to consolidate */
- mchunkptr unsorted_bin; /* bin header */
- mchunkptr first_unsorted; /* chunk to link to */
-
- /* These have same use as in free() */
- mchunkptr nextchunk;
- INTERNAL_SIZE_T size;
- INTERNAL_SIZE_T nextsize;
- INTERNAL_SIZE_T prevsize;
- int nextinuse;
- mchunkptr bck;
- mchunkptr fwd;
-
- /*
- If max_fast is 0, we know that av hasn't
- yet been initialized, in which case do so below
- */
-
- if (av->max_fast != 0) {
- clear_fastchunks(av);
-
- unsorted_bin = unsorted_chunks(av);
-
- /*
- Remove each chunk from fast bin and consolidate it, placing it
- then in unsorted bin. Among other reasons for doing this,
- placing in unsorted bin avoids needing to calculate actual bins
- until malloc is sure that chunks aren't immediately going to be
- reused anyway.
- */
-
- maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
- fb = &(av->fastbins[0]);
- do {
- if ( (p = *fb) != 0) {
- *fb = 0;
-
- do {
- check_inuse_chunk(p);
- nextp = p->fd;
-
- /* Slightly streamlined version of consolidation code in free() */
- size = p->size & ~PREV_INUSE;
- nextchunk = chunk_at_offset(p, size);
- nextsize = chunksize(nextchunk);
-
- if (!prev_inuse(p)) {
- prevsize = p->prev_size;
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
- }
-
- if (nextchunk != av->top) {
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- set_head(nextchunk, nextsize);
-
- if (!nextinuse) {
- size += nextsize;
- unlink(nextchunk, bck, fwd);
- }
-
- first_unsorted = unsorted_bin->fd;
- unsorted_bin->fd = p;
- first_unsorted->bk = p;
-
- set_head(p, size | PREV_INUSE);
- p->bk = unsorted_bin;
- p->fd = first_unsorted;
- set_foot(p, size);
- }
-
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- }
-
- } while ( (p = nextp) != 0);
-
- }
- } while (fb++ != maxfb);
- }
- else {
- malloc_init_state(av);
- check_malloc_state();
- }
-}
-
-/*
- ------------------------------ realloc ------------------------------
-*/
-
-
-#if __STD_C
-Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
-#else
-Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
-#endif
-{
- mstate av = get_malloc_state();
-
- INTERNAL_SIZE_T nb; /* padded request size */
-
- mchunkptr oldp; /* chunk corresponding to oldmem */
- INTERNAL_SIZE_T oldsize; /* its size */
-
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- Void_t* newmem; /* corresponding user mem */
-
- mchunkptr next; /* next contiguous chunk after oldp */
-
- mchunkptr remainder; /* extra space at end of newp */
- CHUNK_SIZE_T remainder_size; /* its size */
-
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
-
- CHUNK_SIZE_T copysize; /* bytes to copy */
- unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
- INTERNAL_SIZE_T* s; /* copy source */
- INTERNAL_SIZE_T* d; /* copy destination */
-
-
-#ifdef REALLOC_ZERO_BYTES_FREES
- if (bytes == 0) {
- fREe(oldmem);
- return 0;
- }
-#endif
-
- /* realloc of null is supposed to be same as malloc */
- if (oldmem == 0) return mALLOc(bytes);
-
- checked_request2size(bytes, nb);
-
- oldp = mem2chunk(oldmem);
- oldsize = chunksize(oldp);
-
- check_inuse_chunk(oldp);
-
- if (!chunk_is_mmapped(oldp)) {
-
- if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
- /* already big enough; split below */
- newp = oldp;
- newsize = oldsize;
- }
-
- else {
- next = chunk_at_offset(oldp, oldsize);
-
- /* Try to expand forward into top */
- if (next == av->top &&
- (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
- (CHUNK_SIZE_T)(nb + MINSIZE)) {
- set_head_size(oldp, nb);
- av->top = chunk_at_offset(oldp, nb);
- set_head(av->top, (newsize - nb) | PREV_INUSE);
- return chunk2mem(oldp);
- }
-
- /* Try to expand forward into next chunk; split off remainder below */
- else if (next != av->top &&
- !inuse(next) &&
- (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
- (CHUNK_SIZE_T)(nb)) {
- newp = oldp;
- unlink(next, bck, fwd);
- }
-
- /* allocate, copy, free */
- else {
- newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
- if (newmem == 0)
- return 0; /* propagate failure */
-
- newp = mem2chunk(newmem);
- newsize = chunksize(newp);
-
- /*
- Avoid copy if newp is next chunk after oldp.
- */
- if (newp == next) {
- newsize += oldsize;
- newp = oldp;
- }
- else {
- /*
- Unroll copy of <= 36 bytes (72 if 8byte sizes)
- We know that contents have an odd number of
- INTERNAL_SIZE_T-sized words; minimally 3.
- */
-
- copysize = oldsize - SIZE_SZ;
- s = (INTERNAL_SIZE_T*)(oldmem);
- d = (INTERNAL_SIZE_T*)(newmem);
- ncopies = copysize / sizeof(INTERNAL_SIZE_T);
- assert(ncopies >= 3);
-
- if (ncopies > 9)
- MALLOC_COPY(d, s, copysize);
-
- else {
- *(d+0) = *(s+0);
- *(d+1) = *(s+1);
- *(d+2) = *(s+2);
- if (ncopies > 4) {
- *(d+3) = *(s+3);
- *(d+4) = *(s+4);
- if (ncopies > 6) {
- *(d+5) = *(s+5);
- *(d+6) = *(s+6);
- if (ncopies > 8) {
- *(d+7) = *(s+7);
- *(d+8) = *(s+8);
- }
- }
- }
- }
-
- fREe(oldmem);
- check_inuse_chunk(newp);
- return chunk2mem(newp);
- }
- }
- }
-
- /* If possible, free extra space in old or extended chunk */
-
- assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
-
- remainder_size = newsize - nb;
-
- if (remainder_size < MINSIZE) { /* not enough extra to split off */
- set_head_size(newp, newsize);
- set_inuse_bit_at_offset(newp, newsize);
- }
- else { /* split remainder */
- remainder = chunk_at_offset(newp, nb);
- set_head_size(newp, nb);
- set_head(remainder, remainder_size | PREV_INUSE);
- /* Mark remainder as inuse so free() won't complain */
- set_inuse_bit_at_offset(remainder, remainder_size);
- fREe(chunk2mem(remainder));
- }
-
- check_inuse_chunk(newp);
- return chunk2mem(newp);
- }
-
- /*
- Handle mmap cases
- */
-
- else {
-#if HAVE_MMAP
-
-#if HAVE_MREMAP
- INTERNAL_SIZE_T offset = oldp->prev_size;
- size_t pagemask = av->pagesize - 1;
- char *cp;
- CHUNK_SIZE_T sum;
-
- /* Note the extra SIZE_SZ overhead */
- newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
-
- /* don't need to remap if still within same page */
- if (oldsize == newsize - offset)
- return oldmem;
-
- cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
-
- if (cp != (char*)MORECORE_FAILURE) {
-
- newp = (mchunkptr)(cp + offset);
- set_head(newp, (newsize - offset)|IS_MMAPPED);
-
- assert(aligned_OK(chunk2mem(newp)));
- assert((newp->prev_size == offset));
-
- /* update statistics */
- sum = av->mmapped_mem += newsize - oldsize;
- if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
- av->max_mmapped_mem = sum;
- sum += av->sbrked_mem;
- if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
- av->max_total_mem = sum;
-
- return chunk2mem(newp);
- }
-#endif
-
- /* Note the extra SIZE_SZ overhead. */
- if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
- newmem = oldmem; /* do nothing */
- else {
- /* Must alloc, copy, free. */
- newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
- if (newmem != 0) {
- MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
- fREe(oldmem);
- }
- }
- return newmem;
-
-#else
- /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
- check_malloc_state();
- MALLOC_FAILURE_ACTION;
- return 0;
-#endif
- }
-}
-
-/*
- ------------------------------ memalign ------------------------------
-*/
-
-#if __STD_C
-Void_t* mEMALIGn(size_t alignment, size_t bytes)
-#else
-Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
-#endif
-{
- INTERNAL_SIZE_T nb; /* padded request size */
- char* m; /* memory returned by malloc call */
- mchunkptr p; /* corresponding chunk */
- char* brk; /* alignment point within p */
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
- mchunkptr remainder; /* spare room at end to split off */
- CHUNK_SIZE_T remainder_size; /* its size */
- INTERNAL_SIZE_T size;
-
- /* If need less alignment than we give anyway, just relay to malloc */
-
- if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
-
- /* Otherwise, ensure that it is at least a minimum chunk size */
-
- if (alignment < MINSIZE) alignment = MINSIZE;
-
- /* Make sure alignment is power of 2 (in case MINSIZE is not). */
- if ((alignment & (alignment - 1)) != 0) {
- size_t a = MALLOC_ALIGNMENT * 2;
- while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
- alignment = a;
- }
-
- checked_request2size(bytes, nb);
-
- /*
- Strategy: find a spot within that chunk that meets the alignment
- request, and then possibly free the leading and trailing space.
- */
-
-
- /* Call malloc with worst case padding to hit alignment. */
-
- m = (char*)(mALLOc(nb + alignment + MINSIZE));
-
- if (m == 0) return 0; /* propagate failure */
-
- p = mem2chunk(m);
-
- if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
-
- /*
- Find an aligned spot inside chunk. Since we need to give back
- leading space in a chunk of at least MINSIZE, if the first
- calculation places us at a spot with less than MINSIZE leader,
- we can move to the next aligned spot -- we've allocated enough
- total room so that this is always possible.
- */
-
- brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
- -((signed long) alignment)));
- if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
- brk += alignment;
-
- newp = (mchunkptr)brk;
- leadsize = brk - (char*)(p);
- newsize = chunksize(p) - leadsize;
-
- /* For mmapped chunks, just adjust offset */
- if (chunk_is_mmapped(p)) {
- newp->prev_size = p->prev_size + leadsize;
- set_head(newp, newsize|IS_MMAPPED);
- return chunk2mem(newp);
- }
-
- /* Otherwise, give back leader, use the rest */
- set_head(newp, newsize | PREV_INUSE);
- set_inuse_bit_at_offset(newp, newsize);
- set_head_size(p, leadsize);
- fREe(chunk2mem(p));
- p = newp;
-
- assert (newsize >= nb &&
- (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
- }
-
- /* Also give back spare room at the end */
- if (!chunk_is_mmapped(p)) {
- size = chunksize(p);
- if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
- remainder_size = size - nb;
- remainder = chunk_at_offset(p, nb);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_head_size(p, nb);
- fREe(chunk2mem(remainder));
- }
- }
-
- check_inuse_chunk(p);
- return chunk2mem(p);
-}
-
-/*
- ------------------------------ calloc ------------------------------
-*/
-
-#if __STD_C
-Void_t* cALLOc(size_t n_elements, size_t elem_size)
-#else
-Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
-#endif
-{
- mchunkptr p;
- CHUNK_SIZE_T clearsize;
- CHUNK_SIZE_T nclears;
- INTERNAL_SIZE_T* d;
-
- Void_t* mem = mALLOc(n_elements * elem_size);
-
- if (mem != 0) {
- p = mem2chunk(mem);
-
- if (!chunk_is_mmapped(p))
- {
- /*
- Unroll clear of <= 36 bytes (72 if 8byte sizes)
- We know that contents have an odd number of
- INTERNAL_SIZE_T-sized words; minimally 3.
- */
-
- d = (INTERNAL_SIZE_T*)mem;
- clearsize = chunksize(p) - SIZE_SZ;
- nclears = clearsize / sizeof(INTERNAL_SIZE_T);
- assert(nclears >= 3);
-
- if (nclears > 9)
- MALLOC_ZERO(d, clearsize);
-
- else {
- *(d+0) = 0;
- *(d+1) = 0;
- *(d+2) = 0;
- if (nclears > 4) {
- *(d+3) = 0;
- *(d+4) = 0;
- if (nclears > 6) {
- *(d+5) = 0;
- *(d+6) = 0;
- if (nclears > 8) {
- *(d+7) = 0;
- *(d+8) = 0;
- }
- }
- }
- }
- }
-#if ! MMAP_CLEARS
- else
- {
- d = (INTERNAL_SIZE_T*)mem;
- /*
- Note the additional SIZE_SZ
- */
- clearsize = chunksize(p) - 2*SIZE_SZ;
- MALLOC_ZERO(d, clearsize);
- }
-#endif
- }
- return mem;
-}
-
-/*
- ------------------------------ cfree ------------------------------
-*/
-
-#if __STD_C
-void cFREe(Void_t *mem)
-#else
-void cFREe(mem) Void_t *mem;
-#endif
-{
- fREe(mem);
-}
-
-/*
- ------------------------- independent_calloc -------------------------
-*/
-
-#if __STD_C
-Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
-#else
-Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
-#endif
-{
- size_t sz = elem_size; /* serves as 1-element array */
- /* opts arg of 3 means all elements are same size, and should be cleared */
- return iALLOc(n_elements, &sz, 3, chunks);
-}
-
-/*
- ------------------------- independent_comalloc -------------------------
-*/
-
-#if __STD_C
-Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
-#else
-Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
-#endif
-{
- return iALLOc(n_elements, sizes, 0, chunks);
-}
-
-
-/*
- ------------------------------ ialloc ------------------------------
- ialloc provides common support for independent_X routines, handling all of
- the combinations that can result.
-
- The opts arg has:
- bit 0 set if all elements are same size (using sizes[0])
- bit 1 set if elements should be zeroed
-*/
-
-
-#if __STD_C
-static Void_t** iALLOc(size_t n_elements,
- size_t* sizes,
- int opts,
- Void_t* chunks[])
-#else
-static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
-#endif
-{
- mstate av = get_malloc_state();
- INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
- INTERNAL_SIZE_T contents_size; /* total size of elements */
- INTERNAL_SIZE_T array_size; /* request size of pointer array */
- Void_t* mem; /* malloced aggregate space */
- mchunkptr p; /* corresponding chunk */
- INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
- Void_t** marray; /* either "chunks" or malloced ptr array */
- mchunkptr array_chunk; /* chunk for malloced ptr array */
- int mmx; /* to disable mmap */
- INTERNAL_SIZE_T size;
- size_t i;
-
- /* Ensure initialization */
- if (av->max_fast == 0) malloc_consolidate(av);
-
- /* compute array length, if needed */
- if (chunks != 0) {
- if (n_elements == 0)
- return chunks; /* nothing to do */
- marray = chunks;
- array_size = 0;
- }
- else {
- /* if empty req, must still return chunk representing empty array */
- if (n_elements == 0)
- return (Void_t**) mALLOc(0);
- marray = 0;
- array_size = request2size(n_elements * (sizeof(Void_t*)));
- }
-
- /* compute total element size */
- if (opts & 0x1) { /* all-same-size */
- element_size = request2size(*sizes);
- contents_size = n_elements * element_size;
- }
- else { /* add up all the sizes */
- element_size = 0;
- contents_size = 0;
- for (i = 0; i != n_elements; ++i)
- contents_size += request2size(sizes[i]);
- }
-
- /* subtract out alignment bytes from total to minimize overallocation */
- size = contents_size + array_size - MALLOC_ALIGN_MASK;
-
- /*
- Allocate the aggregate chunk.
- But first disable mmap so malloc won't use it, since
- we would not be able to later free/realloc space internal
- to a segregated mmap region.
- */
- mmx = av->n_mmaps_max; /* disable mmap */
- av->n_mmaps_max = 0;
- mem = mALLOc(size);
- av->n_mmaps_max = mmx; /* reset mmap */
- if (mem == 0)
- return 0;
-
- p = mem2chunk(mem);
- assert(!chunk_is_mmapped(p));
- remainder_size = chunksize(p);
-
- if (opts & 0x2) { /* optionally clear the elements */
- MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
- }
-
- /* If not provided, allocate the pointer array as final part of chunk */
- if (marray == 0) {
- array_chunk = chunk_at_offset(p, contents_size);
- marray = (Void_t**) (chunk2mem(array_chunk));
- set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
- remainder_size = contents_size;
- }
-
- /* split out elements */
- for (i = 0; ; ++i) {
- marray[i] = chunk2mem(p);
- if (i != n_elements-1) {
- if (element_size != 0)
- size = element_size;
- else
- size = request2size(sizes[i]);
- remainder_size -= size;
- set_head(p, size | PREV_INUSE);
- p = chunk_at_offset(p, size);
- }
- else { /* the final element absorbs any overallocation slop */
- set_head(p, remainder_size | PREV_INUSE);
- break;
- }
- }
-
-#if DEBUG
- if (marray != chunks) {
- /* final element must have exactly exhausted chunk */
- if (element_size != 0)
- assert(remainder_size == element_size);
- else
- assert(remainder_size == request2size(sizes[i]));
- check_inuse_chunk(mem2chunk(marray));
- }
-
- for (i = 0; i != n_elements; ++i)
- check_inuse_chunk(mem2chunk(marray[i]));
-#endif
-
- return marray;
-}
-
-
-/*
- ------------------------------ valloc ------------------------------
-*/
-
-#if __STD_C
-Void_t* vALLOc(size_t bytes)
-#else
-Void_t* vALLOc(bytes) size_t bytes;
-#endif
-{
- /* Ensure initialization */
- mstate av = get_malloc_state();
- if (av->max_fast == 0) malloc_consolidate(av);
- return mEMALIGn(av->pagesize, bytes);
-}
-
-/*
- ------------------------------ pvalloc ------------------------------
-*/
-
-
-#if __STD_C
-Void_t* pVALLOc(size_t bytes)
-#else
-Void_t* pVALLOc(bytes) size_t bytes;
-#endif
-{
- mstate av = get_malloc_state();
- size_t pagesz;
-
- /* Ensure initialization */
- if (av->max_fast == 0) malloc_consolidate(av);
- pagesz = av->pagesize;
- return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
-}
-
-
-/*
- ------------------------------ malloc_trim ------------------------------
-*/
-
-#if __STD_C
-int mTRIm(size_t pad)
-#else
-int mTRIm(pad) size_t pad;
-#endif
-{
- mstate av = get_malloc_state();
- /* Ensure initialization/consolidation */
- malloc_consolidate(av);
-
-#ifndef MORECORE_CANNOT_TRIM
- return sYSTRIm(pad, av);
-#else
- return 0;
-#endif
-}
-
-
-/*
- ------------------------- malloc_usable_size -------------------------
-*/
-
-#if __STD_C
-size_t mUSABLe(Void_t* mem)
-#else
-size_t mUSABLe(mem) Void_t* mem;
-#endif
-{
- mchunkptr p;
- if (mem != 0) {
- p = mem2chunk(mem);
- if (chunk_is_mmapped(p))
- return chunksize(p) - 2*SIZE_SZ;
- else if (inuse(p))
- return chunksize(p) - SIZE_SZ;
- }
- return 0;
-}
-
-/*
- ------------------------------ mallinfo ------------------------------
-*/
-
-struct mallinfo mALLINFo()
-{
- mstate av = get_malloc_state();
- struct mallinfo mi;
- int i;
- mbinptr b;
- mchunkptr p;
- INTERNAL_SIZE_T avail;
- INTERNAL_SIZE_T fastavail;
- int nblocks;
- int nfastblocks;
-
- /* Ensure initialization */
- if (av->top == 0) malloc_consolidate(av);
-
- check_malloc_state();
-
- /* Account for top */
- avail = chunksize(av->top);
- nblocks = 1; /* top always exists */
-
- /* traverse fastbins */
- nfastblocks = 0;
- fastavail = 0;
-
- for (i = 0; i < NFASTBINS; ++i) {
- for (p = av->fastbins[i]; p != 0; p = p->fd) {
- ++nfastblocks;
- fastavail += chunksize(p);
- }
- }
-
- avail += fastavail;
-
- /* traverse regular bins */
- for (i = 1; i < NBINS; ++i) {
- b = bin_at(av, i);
- for (p = last(b); p != b; p = p->bk) {
- ++nblocks;
- avail += chunksize(p);
- }
- }
-
- mi.smblks = nfastblocks;
- mi.ordblks = nblocks;
- mi.fordblks = avail;
- mi.uordblks = av->sbrked_mem - avail;
- mi.arena = av->sbrked_mem;
- mi.hblks = av->n_mmaps;
- mi.hblkhd = av->mmapped_mem;
- mi.fsmblks = fastavail;
- mi.keepcost = chunksize(av->top);
- mi.usmblks = av->max_total_mem;
- return mi;
-}
-
-/*
- ------------------------------ malloc_stats ------------------------------
-*/
-
-void mSTATs()
-{
- struct mallinfo mi = mALLINFo();
-
-#ifdef WIN32
- {
- CHUNK_SIZE_T free, reserved, committed;
- vminfo (&free, &reserved, &committed);
- fprintf(stderr, "free bytes = %10lu\n",
- free);
- fprintf(stderr, "reserved bytes = %10lu\n",
- reserved);
- fprintf(stderr, "committed bytes = %10lu\n",
- committed);
- }
-#endif
-
-
- fprintf(stderr, "max system bytes = %10lu\n",
- (CHUNK_SIZE_T)(mi.usmblks));
- fprintf(stderr, "system bytes = %10lu\n",
- (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
- fprintf(stderr, "in use bytes = %10lu\n",
- (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
-
-#ifdef WIN32
- {
- CHUNK_SIZE_T kernel, user;
- if (cpuinfo (TRUE, &kernel, &user)) {
- fprintf(stderr, "kernel ms = %10lu\n",
- kernel);
- fprintf(stderr, "user ms = %10lu\n",
- user);
- }
- }
-#endif
-}
-
-
-/*
- ------------------------------ mallopt ------------------------------
-*/
-
-#if __STD_C
-int mALLOPt(int param_number, int value)
-#else
-int mALLOPt(param_number, value) int param_number; int value;
-#endif
-{
- mstate av = get_malloc_state();
- /* Ensure initialization/consolidation */
- malloc_consolidate(av);
-
- switch(param_number) {
- case M_MXFAST:
- if (value >= 0 && value <= MAX_FAST_SIZE) {
- set_max_fast(av, value);
- return 1;
- }
- else
- return 0;
-
- case M_TRIM_THRESHOLD:
- av->trim_threshold = value;
- return 1;
-
- case M_TOP_PAD:
- av->top_pad = value;
- return 1;
-
- case M_MMAP_THRESHOLD:
- av->mmap_threshold = value;
- return 1;
-
- case M_MMAP_MAX:
-#if !HAVE_MMAP
- if (value != 0)
- return 0;
-#endif
- av->n_mmaps_max = value;
- return 1;
-
- default:
- return 0;
- }
-}
-
-
-/*
- -------------------- Alternative MORECORE functions --------------------
-*/
-
-
-/*
- General Requirements for MORECORE.
-
- The MORECORE function must have the following properties:
-
- If MORECORE_CONTIGUOUS is false:
-
- * MORECORE must allocate in multiples of pagesize. It will
- only be called with arguments that are multiples of pagesize.
-
- * MORECORE(0) must return an address that is at least
- MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
-
- else (i.e. If MORECORE_CONTIGUOUS is true):
-
- * Consecutive calls to MORECORE with positive arguments
- return increasing addresses, indicating that space has been
- contiguously extended.
-
- * MORECORE need not allocate in multiples of pagesize.
- Calls to MORECORE need not have args of multiples of pagesize.
-
- * MORECORE need not page-align.
-
- In either case:
-
- * MORECORE may allocate more memory than requested. (Or even less,
- but this will generally result in a malloc failure.)
-
- * MORECORE must not allocate memory when given argument zero, but
- instead return one past the end address of memory from previous
- nonzero call. This malloc does NOT call MORECORE(0)
- until at least one call with positive arguments is made, so
- the initial value returned is not important.
-
- * Even though consecutive calls to MORECORE need not return contiguous
- addresses, it must be OK for malloc'ed chunks to span multiple
- regions in those cases where they do happen to be contiguous.
-
- * MORECORE need not handle negative arguments -- it may instead
- just return MORECORE_FAILURE when given negative arguments.
- Negative arguments are always multiples of pagesize. MORECORE
- must not misinterpret negative args as large positive unsigned
- args. You can suppress all such calls from even occurring by defining
- MORECORE_CANNOT_TRIM,
-
- There is some variation across systems about the type of the
- argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
- actually be size_t, because sbrk supports negative args, so it is
- normally the signed type of the same width as size_t (sometimes
- declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
- matter though. Internally, we use "long" as arguments, which should
- work across all reasonable possibilities.
-
- Additionally, if MORECORE ever returns failure for a positive
- request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
- system allocator. This is a useful backup strategy for systems with
- holes in address spaces -- in this case sbrk cannot contiguously
- expand the heap, but mmap may be able to map noncontiguous space.
-
- If you'd like mmap to ALWAYS be used, you can define MORECORE to be
- a function that always returns MORECORE_FAILURE.
-
- Malloc only has limited ability to detect failures of MORECORE
- to supply contiguous space when it says it can. In particular,
- multithreaded programs that do not use locks may result in
- rece conditions across calls to MORECORE that result in gaps
- that cannot be detected as such, and subsequent corruption.
-
- If you are using this malloc with something other than sbrk (or its
- emulation) to supply memory regions, you probably want to set
- MORECORE_CONTIGUOUS as false. As an example, here is a custom
- allocator kindly contributed for pre-OSX macOS. It uses virtually
- but not necessarily physically contiguous non-paged memory (locked
- in, present and won't get swapped out). You can use it by
- uncommenting this section, adding some #includes, and setting up the
- appropriate defines above:
-
- #define MORECORE osMoreCore
- #define MORECORE_CONTIGUOUS 0
-
- There is also a shutdown routine that should somehow be called for
- cleanup upon program exit.
-
- #define MAX_POOL_ENTRIES 100
- #define MINIMUM_MORECORE_SIZE (64 * 1024)
- static int next_os_pool;
- void *our_os_pools[MAX_POOL_ENTRIES];
-
- void *osMoreCore(int size)
- {
- void *ptr = 0;
- static void *sbrk_top = 0;
-
- if (size > 0)
- {
- if (size < MINIMUM_MORECORE_SIZE)
- size = MINIMUM_MORECORE_SIZE;
- if (CurrentExecutionLevel() == kTaskLevel)
- ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
- if (ptr == 0)
- {
- return (void *) MORECORE_FAILURE;
- }
- // save ptrs so they can be freed during cleanup
- our_os_pools[next_os_pool] = ptr;
- next_os_pool++;
- ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
- sbrk_top = (char *) ptr + size;
- return ptr;
- }
- else if (size < 0)
- {
- // we don't currently support shrink behavior
- return (void *) MORECORE_FAILURE;
- }
- else
- {
- return sbrk_top;
- }
- }
-
- // cleanup any allocated memory pools
- // called as last thing before shutting down driver
-
- void osCleanupMem(void)
- {
- void **ptr;
-
- for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
- if (*ptr)
- {
- PoolDeallocate(*ptr);
- *ptr = 0;
- }
- }
-
-*/
-
-
-/*
- --------------------------------------------------------------
-
- Emulation of sbrk for win32.
- Donated by J. Walter <Walter@GeNeSys-e.de>.
- For additional information about this code, and malloc on Win32, see
- http://www.genesys-e.de/jwalter/
-*/
-
-
-#ifdef WIN32
-
-#ifdef _DEBUG
-/* #define TRACE */
-#endif
-
-/* Support for USE_MALLOC_LOCK */
-#ifdef USE_MALLOC_LOCK
-
-/* Wait for spin lock */
-static int slwait (int *sl) {
- while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
- Sleep (0);
- return 0;
-}
-
-/* Release spin lock */
-static int slrelease (int *sl) {
- InterlockedExchange (sl, 0);
- return 0;
-}
-
-#ifdef NEEDED
-/* Spin lock for emulation code */
-static int g_sl;
-#endif
-
-#endif /* USE_MALLOC_LOCK */
-
-/* getpagesize for windows */
-static long getpagesize (void) {
- static long g_pagesize = 0;
- if (! g_pagesize) {
- SYSTEM_INFO system_info;
- GetSystemInfo (&system_info);
- g_pagesize = system_info.dwPageSize;
- }
- return g_pagesize;
-}
-static long getregionsize (void) {
- static long g_regionsize = 0;
- if (! g_regionsize) {
- SYSTEM_INFO system_info;
- GetSystemInfo (&system_info);
- g_regionsize = system_info.dwAllocationGranularity;
- }
- return g_regionsize;
-}
-
-/* A region list entry */
-typedef struct _region_list_entry {
- void *top_allocated;
- void *top_committed;
- void *top_reserved;
- long reserve_size;
- struct _region_list_entry *previous;
-} region_list_entry;
-
-/* Allocate and link a region entry in the region list */
-static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
- region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
- if (! next)
- return FALSE;
- next->top_allocated = (char *) base_reserved;
- next->top_committed = (char *) base_reserved;
- next->top_reserved = (char *) base_reserved + reserve_size;
- next->reserve_size = reserve_size;
- next->previous = *last;
- *last = next;
- return TRUE;
-}
-/* Free and unlink the last region entry from the region list */
-static int region_list_remove (region_list_entry **last) {
- region_list_entry *previous = (*last)->previous;
- if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
- return FALSE;
- *last = previous;
- return TRUE;
-}
-
-#define CEIL(size,to) (((size)+(to)-1)&~((to)-1))
-#define FLOOR(size,to) ((size)&~((to)-1))
-
-#define SBRK_SCALE 0
-/* #define SBRK_SCALE 1 */
-/* #define SBRK_SCALE 2 */
-/* #define SBRK_SCALE 4 */
-
-/* sbrk for windows */
-static void *sbrk (long size) {
- static long g_pagesize, g_my_pagesize;
- static long g_regionsize, g_my_regionsize;
- static region_list_entry *g_last;
- void *result = (void *) MORECORE_FAILURE;
-#ifdef TRACE
- printf ("sbrk %d\n", size);
-#endif
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Wait for spin lock */
- slwait (&g_sl);
-#endif
- /* First time initialization */
- if (! g_pagesize) {
- g_pagesize = getpagesize ();
- g_my_pagesize = g_pagesize << SBRK_SCALE;
- }
- if (! g_regionsize) {
- g_regionsize = getregionsize ();
- g_my_regionsize = g_regionsize << SBRK_SCALE;
- }
- if (! g_last) {
- if (! region_list_append (&g_last, 0, 0))
- goto sbrk_exit;
- }
- /* Assert invariants */
- assert (g_last);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
- g_last->top_allocated <= g_last->top_committed);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
- g_last->top_committed <= g_last->top_reserved &&
- (unsigned) g_last->top_committed % g_pagesize == 0);
- assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
- assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
- /* Allocation requested? */
- if (size >= 0) {
- /* Allocation size is the requested size */
- long allocate_size = size;
- /* Compute the size to commit */
- long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
- /* Do we reach the commit limit? */
- if (to_commit > 0) {
- /* Round size to commit */
- long commit_size = CEIL (to_commit, g_my_pagesize);
- /* Compute the size to reserve */
- long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
- /* Do we reach the reserve limit? */
- if (to_reserve > 0) {
- /* Compute the remaining size to commit in the current region */
- long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
- if (remaining_commit_size > 0) {
- /* Assert preconditions */
- assert ((unsigned) g_last->top_committed % g_pagesize == 0);
- assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
- /* Commit this */
- void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
- MEM_COMMIT, PAGE_READWRITE);
- /* Check returned pointer for consistency */
- if (base_committed != g_last->top_committed)
- goto sbrk_exit;
- /* Assert postconditions */
- assert ((unsigned) base_committed % g_pagesize == 0);
-#ifdef TRACE
- printf ("Commit %p %d\n", base_committed, remaining_commit_size);
-#endif
- /* Adjust the regions commit top */
- g_last->top_committed = (char *) base_committed + remaining_commit_size;
- }
- } {
- /* Now we are going to search and reserve. */
- int contiguous = -1;
- int found = FALSE;
- MEMORY_BASIC_INFORMATION memory_info;
- void *base_reserved;
- long reserve_size;
- do {
- /* Assume contiguous memory */
- contiguous = TRUE;
- /* Round size to reserve */
- reserve_size = CEIL (to_reserve, g_my_regionsize);
- /* Start with the current region's top */
- memory_info.BaseAddress = g_last->top_reserved;
- /* Assert preconditions */
- assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
- assert (0 < reserve_size && reserve_size % g_regionsize == 0);
- while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
- /* Assert postconditions */
- assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
-#ifdef TRACE
- printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
- memory_info.State == MEM_FREE ? "FREE":
- (memory_info.State == MEM_RESERVE ? "RESERVED":
- (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
-#endif
- /* Region is free, well aligned and big enough: we are done */
- if (memory_info.State == MEM_FREE &&
- (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
- memory_info.RegionSize >= (unsigned) reserve_size) {
- found = TRUE;
- break;
- }
- /* From now on we can't get contiguous memory! */
- contiguous = FALSE;
- /* Recompute size to reserve */
- reserve_size = CEIL (allocate_size, g_my_regionsize);
- memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
- /* Assert preconditions */
- assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
- assert (0 < reserve_size && reserve_size % g_regionsize == 0);
- }
- /* Search failed? */
- if (! found)
- goto sbrk_exit;
- /* Assert preconditions */
- assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
- assert (0 < reserve_size && reserve_size % g_regionsize == 0);
- /* Try to reserve this */
- base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
- MEM_RESERVE, PAGE_NOACCESS);
- if (! base_reserved) {
- int rc = GetLastError ();
- if (rc != ERROR_INVALID_ADDRESS)
- goto sbrk_exit;
- }
- /* A null pointer signals (hopefully) a race condition with another thread. */
- /* In this case, we try again. */
- } while (! base_reserved);
- /* Check returned pointer for consistency */
- if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
- goto sbrk_exit;
- /* Assert postconditions */
- assert ((unsigned) base_reserved % g_regionsize == 0);
-#ifdef TRACE
- printf ("Reserve %p %d\n", base_reserved, reserve_size);
-#endif
- /* Did we get contiguous memory? */
- if (contiguous) {
- long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
- /* Adjust allocation size */
- allocate_size -= start_size;
- /* Adjust the regions allocation top */
- g_last->top_allocated = g_last->top_committed;
- /* Recompute the size to commit */
- to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
- /* Round size to commit */
- commit_size = CEIL (to_commit, g_my_pagesize);
- }
- /* Append the new region to the list */
- if (! region_list_append (&g_last, base_reserved, reserve_size))
- goto sbrk_exit;
- /* Didn't we get contiguous memory? */
- if (! contiguous) {
- /* Recompute the size to commit */
- to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
- /* Round size to commit */
- commit_size = CEIL (to_commit, g_my_pagesize);
- }
- }
- }
- /* Assert preconditions */
- assert ((unsigned) g_last->top_committed % g_pagesize == 0);
- assert (0 < commit_size && commit_size % g_pagesize == 0); {
- /* Commit this */
- void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
- MEM_COMMIT, PAGE_READWRITE);
- /* Check returned pointer for consistency */
- if (base_committed != g_last->top_committed)
- goto sbrk_exit;
- /* Assert postconditions */
- assert ((unsigned) base_committed % g_pagesize == 0);
-#ifdef TRACE
- printf ("Commit %p %d\n", base_committed, commit_size);
-#endif
- /* Adjust the regions commit top */
- g_last->top_committed = (char *) base_committed + commit_size;
- }
- }
- /* Adjust the regions allocation top */
- g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
- result = (char *) g_last->top_allocated - size;
- /* Deallocation requested? */
- } else if (size < 0) {
- long deallocate_size = - size;
- /* As long as we have a region to release */
- while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
- /* Get the size to release */
- long release_size = g_last->reserve_size;
- /* Get the base address */
- void *base_reserved = (char *) g_last->top_reserved - release_size;
- /* Assert preconditions */
- assert ((unsigned) base_reserved % g_regionsize == 0);
- assert (0 < release_size && release_size % g_regionsize == 0); {
- /* Release this */
- int rc = VirtualFree (base_reserved, 0,
- MEM_RELEASE);
- /* Check returned code for consistency */
- if (! rc)
- goto sbrk_exit;
-#ifdef TRACE
- printf ("Release %p %d\n", base_reserved, release_size);
-#endif
- }
- /* Adjust deallocation size */
- deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
- /* Remove the old region from the list */
- if (! region_list_remove (&g_last))
- goto sbrk_exit;
- } {
- /* Compute the size to decommit */
- long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
- if (to_decommit >= g_my_pagesize) {
- /* Compute the size to decommit */
- long decommit_size = FLOOR (to_decommit, g_my_pagesize);
- /* Compute the base address */
- void *base_committed = (char *) g_last->top_committed - decommit_size;
- /* Assert preconditions */
- assert ((unsigned) base_committed % g_pagesize == 0);
- assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
- /* Decommit this */
- int rc = VirtualFree ((char *) base_committed, decommit_size,
- MEM_DECOMMIT);
- /* Check returned code for consistency */
- if (! rc)
- goto sbrk_exit;
-#ifdef TRACE
- printf ("Decommit %p %d\n", base_committed, decommit_size);
-#endif
- }
- /* Adjust deallocation size and regions commit and allocate top */
- deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
- g_last->top_committed = base_committed;
- g_last->top_allocated = base_committed;
- }
- }
- /* Adjust regions allocate top */
- g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
- /* Check for underflow */
- if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
- g_last->top_allocated > g_last->top_committed) {
- /* Adjust regions allocate top */
- g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
- goto sbrk_exit;
- }
- result = g_last->top_allocated;
- }
- /* Assert invariants */
- assert (g_last);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
- g_last->top_allocated <= g_last->top_committed);
- assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
- g_last->top_committed <= g_last->top_reserved &&
- (unsigned) g_last->top_committed % g_pagesize == 0);
- assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
- assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
-
-sbrk_exit:
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Release spin lock */
- slrelease (&g_sl);
-#endif
- return result;
-}
-
-/* mmap for windows */
-static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
- static long g_pagesize;
- static long g_regionsize;
-#ifdef TRACE
- printf ("mmap %d\n", size);
-#endif
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Wait for spin lock */
- slwait (&g_sl);
-#endif
- /* First time initialization */
- if (! g_pagesize)
- g_pagesize = getpagesize ();
- if (! g_regionsize)
- g_regionsize = getregionsize ();
- /* Assert preconditions */
- assert ((unsigned) ptr % g_regionsize == 0);
- assert (size % g_pagesize == 0);
- /* Allocate this */
- ptr = VirtualAlloc (ptr, size,
- MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
- if (! ptr) {
- ptr = (void *) MORECORE_FAILURE;
- goto mmap_exit;
- }
- /* Assert postconditions */
- assert ((unsigned) ptr % g_regionsize == 0);
-#ifdef TRACE
- printf ("Commit %p %d\n", ptr, size);
-#endif
-mmap_exit:
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Release spin lock */
- slrelease (&g_sl);
-#endif
- return ptr;
-}
-
-/* munmap for windows */
-static long munmap (void *ptr, long size) {
- static long g_pagesize;
- static long g_regionsize;
- int rc = MUNMAP_FAILURE;
-#ifdef TRACE
- printf ("munmap %p %d\n", ptr, size);
-#endif
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Wait for spin lock */
- slwait (&g_sl);
-#endif
- /* First time initialization */
- if (! g_pagesize)
- g_pagesize = getpagesize ();
- if (! g_regionsize)
- g_regionsize = getregionsize ();
- /* Assert preconditions */
- assert ((unsigned) ptr % g_regionsize == 0);
- assert (size % g_pagesize == 0);
- /* Free this */
- if (! VirtualFree (ptr, 0,
- MEM_RELEASE))
- goto munmap_exit;
- rc = 0;
-#ifdef TRACE
- printf ("Release %p %d\n", ptr, size);
-#endif
-munmap_exit:
-#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
- /* Release spin lock */
- slrelease (&g_sl);
-#endif
- return rc;
-}
-
-static void vminfo (CHUNK_SIZE_T *free, CHUNK_SIZE_T *reserved, CHUNK_SIZE_T *committed) {
- MEMORY_BASIC_INFORMATION memory_info;
- memory_info.BaseAddress = 0;
- *free = *reserved = *committed = 0;
- while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
- switch (memory_info.State) {
- case MEM_FREE:
- *free += memory_info.RegionSize;
- break;
- case MEM_RESERVE:
- *reserved += memory_info.RegionSize;
- break;
- case MEM_COMMIT:
- *committed += memory_info.RegionSize;
- break;
- }
- memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
- }
-}
-
-static int cpuinfo (int whole, CHUNK_SIZE_T *kernel, CHUNK_SIZE_T *user) {
- if (whole) {
- __int64 creation64, exit64, kernel64, user64;
- int rc = GetProcessTimes (GetCurrentProcess (),
- (FILETIME *) &creation64,
- (FILETIME *) &exit64,
- (FILETIME *) &kernel64,
- (FILETIME *) &user64);
- if (! rc) {
- *kernel = 0;
- *user = 0;
- return FALSE;
- }
- *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
- *user = (CHUNK_SIZE_T) (user64 / 10000);
- return TRUE;
- } else {
- __int64 creation64, exit64, kernel64, user64;
- int rc = GetThreadTimes (GetCurrentThread (),
- (FILETIME *) &creation64,
- (FILETIME *) &exit64,
- (FILETIME *) &kernel64,
- (FILETIME *) &user64);
- if (! rc) {
- *kernel = 0;
- *user = 0;
- return FALSE;
- }
- *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
- *user = (CHUNK_SIZE_T) (user64 / 10000);
- return TRUE;
- }
-}
-
-#endif /* WIN32 */
-
-/* ------------------------------------------------------------
-History:
- V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
- * Fix malloc_state bitmap array misdeclaration
-
- V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
- * Allow tuning of FIRST_SORTED_BIN_SIZE
- * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
- * Better detection and support for non-contiguousness of MORECORE.
- Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
- * Bypass most of malloc if no frees. Thanks To Emery Berger.
- * Fix freeing of old top non-contiguous chunk im sysmalloc.
- * Raised default trim and map thresholds to 256K.
- * Fix mmap-related #defines. Thanks to Lubos Lunak.
- * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
- * Branch-free bin calculation
- * Default trim and mmap thresholds now 256K.
-
- V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
- * Introduce independent_comalloc and independent_calloc.
- Thanks to Michael Pachos for motivation and help.
- * Make optional .h file available
- * Allow > 2GB requests on 32bit systems.
- * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
- Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
- and Anonymous.
- * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
- helping test this.)
- * memalign: check alignment arg
- * realloc: don't try to shift chunks backwards, since this
- leads to more fragmentation in some programs and doesn't
- seem to help in any others.
- * Collect all cases in malloc requiring system memory into sYSMALLOc
- * Use mmap as backup to sbrk
- * Place all internal state in malloc_state
- * Introduce fastbins (although similar to 2.5.1)
- * Many minor tunings and cosmetic improvements
- * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
- * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
- Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
- * Include errno.h to support default failure action.
-
- V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
- * return null for negative arguments
- * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
- * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
- (e.g. WIN32 platforms)
- * Cleanup header file inclusion for WIN32 platforms
- * Cleanup code to avoid Microsoft Visual C++ compiler complaints
- * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
- memory allocation routines
- * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
- * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
- usage of 'assert' in non-WIN32 code
- * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
- avoid infinite loop
- * Always call 'fREe()' rather than 'free()'
-
- V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
- * Fixed ordering problem with boundary-stamping
-
- V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
- * Added pvalloc, as recommended by H.J. Liu
- * Added 64bit pointer support mainly from Wolfram Gloger
- * Added anonymously donated WIN32 sbrk emulation
- * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
- * malloc_extend_top: fix mask error that caused wastage after
- foreign sbrks
- * Add linux mremap support code from HJ Liu
-
- V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
- * Integrated most documentation with the code.
- * Add support for mmap, with help from
- Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
- * Use last_remainder in more cases.
- * Pack bins using idea from colin@nyx10.cs.du.edu
- * Use ordered bins instead of best-fit threshhold
- * Eliminate block-local decls to simplify tracing and debugging.
- * Support another case of realloc via move into top
- * Fix error occuring when initial sbrk_base not word-aligned.
- * Rely on page size for units instead of SBRK_UNIT to
- avoid surprises about sbrk alignment conventions.
- * Add mallinfo, mallopt. Thanks to Raymond Nijssen
- (raymond@es.ele.tue.nl) for the suggestion.
- * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
- * More precautions for cases where other routines call sbrk,
- courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
- * Added macros etc., allowing use in linux libc from
- H.J. Lu (hjl@gnu.ai.mit.edu)
- * Inverted this history list
-
- V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
- * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
- * Removed all preallocation code since under current scheme
- the work required to undo bad preallocations exceeds
- the work saved in good cases for most test programs.
- * No longer use return list or unconsolidated bins since
- no scheme using them consistently outperforms those that don't
- given above changes.
- * Use best fit for very large chunks to prevent some worst-cases.
- * Added some support for debugging
-
- V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
- * Removed footers when chunks are in use. Thanks to
- Paul Wilson (wilson@cs.texas.edu) for the suggestion.
-
- V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
- * Added malloc_trim, with help from Wolfram Gloger
- (wmglo@Dent.MED.Uni-Muenchen.DE).
-
- V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
-
- V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
- * realloc: try to expand in both directions
- * malloc: swap order of clean-bin strategy;
- * realloc: only conditionally expand backwards
- * Try not to scavenge used bins
- * Use bin counts as a guide to preallocation
- * Occasionally bin return list chunks in first scan
- * Add a few optimizations from colin@nyx10.cs.du.edu
-
- V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
- * faster bin computation & slightly different binning
- * merged all consolidations to one part of malloc proper
- (eliminating old malloc_find_space & malloc_clean_bin)
- * Scan 2 returns chunks (not just 1)
- * Propagate failure in realloc if malloc returns 0
- * Add stuff to allow compilation on non-ANSI compilers
- from kpv@research.att.com
-
- V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
- * removed potential for odd address access in prev_chunk
- * removed dependency on getpagesize.h
- * misc cosmetics and a bit more internal documentation
- * anticosmetics: mangled names in macros to evade debugger strangeness
- * tested on sparc, hp-700, dec-mips, rs6000
- with gcc & native cc (hp, dec only) allowing
- Detlefs & Zorn comparison study (in SIGPLAN Notices.)
-
- Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
- * Based loosely on libg++-1.2X malloc. (It retains some of the overall
- structure of old version, but most details differ.)
-
-*/
diff --git a/task/mmap.c b/task/mmap.c
deleted file mode 100644
index cefa158..0000000
--- a/task/mmap.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/* mmap.c - A simple mmap for anonymous memory allocations in task.
- Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
- Written by Neal H. Walfield <neal@gnu.org>.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with the GNU Hurd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <sys/mman.h>
-#include <errno.h>
-#include <hurd/anonymous.h>
-#include <hurd/vm.h>
-
-#include "output.h"
-
-void *
-mmap (void *address, size_t length, int protect, int flags,
- int filedes, off_t offset)
-{
- error_t err;
- uintptr_t a = (uintptr_t) address;
-
- if (address)
- panic ("mmap called with non-zero ADDRESS");
- if (flags != (MAP_PRIVATE | MAP_ANONYMOUS))
- panic ("mmap called with invalid flags");
- if (protect != (PROT_READ | PROT_WRITE))
- panic ("mmap called with invalid protection");
-
- err = hurd_anonymous_allocate (&a, length, HURD_ANONYMOUS_ZEROFILL, 0);
- if (err)
- {
- errno = err;
- return MAP_FAILED;
- }
-
- return (void *) a;
-}
-
-int
-munmap (void *addr, size_t length)
-{
- error_t err;
-
- /* POSIX says we must round LENGTH up to an even number of pages.
- If ADDR is unaligned, that is an error (which hurd_vm_deallocate
- will catch). */
- err = hurd_vm_release ((uintptr_t) addr, ((length + getpagesize () - 1)
- & ~(getpagesize () - 1)));
- if (err)
- {
- errno = err;
- return -1;
- }
- return 0;
-}
diff --git a/task/output.c b/task/output.c
deleted file mode 100644
index 8c139d3..0000000
--- a/task/output.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/* output.c - Output routines.
- Copyright (C) 2003 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdarg.h>
-
-#include <l4.h>
-
-#include "output.h"
-
-#include <hurd/wortel.h>
-
-
-/* True if debugging is enabled. */
-int output_debug;
-
-
-/* Send a shutdown request to the rootserver wortel. */
-void
-__attribute__((__noreturn__))
-shutdown (void)
-{
- wortel_shutdown ();
-
- while (1)
- l4_sleep (L4_NEVER);
-
- /* NOT REACHED. */
-}
-
-
-/* Print the single character CHR on the output device. */
-int
-putchar (int chr)
-{
- wortel_putchar (chr);
-
- return 0;
-}
-
-
-int
-puts (const char *str)
-{
- while (*str != '\0')
- putchar (*(str++));
-
- putchar ('\n');
-
- return 0;
-}
-
-
-static void
-print_nr (unsigned long long nr, int base)
-{
- static char *digits = "0123456789abcdef";
- char str[30];
- int i = 0;
-
- do
- {
- str[i++] = digits[nr % base];
- nr = nr / base;
- }
- while (nr);
-
- i--;
- while (i >= 0)
- putchar (str[i--]);
-}
-
-
-static void
-print_signed_nr (long long nr, int base)
-{
- unsigned long long unr;
-
- if (nr < 0)
- {
- putchar ('-');
- unr = -nr;
- }
- else
- unr = nr;
-
- print_nr (unr, base);
-}
-
-
-int
-printf (const char *fmt, ...)
-{
- va_list ap;
-
- va_start (ap, fmt);
- const char *p = fmt;
-
- while (*p != '\0')
- {
- if (*p != '%')
- {
- putchar (*(p++));
- continue;
- }
-
- p++;
- switch (*p)
- {
- case '%':
- putchar ('%');
- p++;
- break;
-
- case 'l':
- p++;
- if (*p != 'l')
- {
- putchar ('%');
- putchar ('l');
- putchar (*(p++));
- continue;
- }
- p++;
- switch (*p)
- {
- case 'o':
- print_nr (va_arg (ap, unsigned long long), 8);
- p++;
- break;
-
- case 'd':
- case 'i':
- print_signed_nr (va_arg (ap, long long), 10);
- p++;
- break;
-
- case 'x':
- case 'X':
- print_nr (va_arg (ap, unsigned long long), 16);
- p++;
- break;
-
- case 'u':
- print_nr (va_arg (ap, unsigned long long), 10);
- p++;
- break;
-
- default:
- putchar ('%');
- putchar ('l');
- putchar ('l');
- putchar (*(p++));
- break;
- }
- break;
-
- case 'o':
- print_nr (va_arg (ap, unsigned int), 8);
- p++;
- break;
-
- case 'd':
- case 'i':
- print_signed_nr (va_arg (ap, int), 10);
- p++;
- break;
-
- case 'x':
- case 'X':
- print_nr (va_arg (ap, unsigned int), 16);
- p++;
- break;
-
- case 'u':
- print_nr (va_arg (ap, unsigned int), 10);
- p++;
- break;
-
- case 'c':
- putchar (va_arg (ap, int));
- p++;
- break;
-
- case 's':
- {
- char *str = va_arg (ap, char *);
- while (*str)
- putchar (*(str++));
- }
- p++;
- break;
-
- case 'p':
- print_nr ((unsigned int) va_arg (ap, void *), 16);
- p++;
- break;
-
- default:
- putchar ('%');
- putchar (*p);
- p++;
- break;
- }
- }
-
- return 0;
-}
diff --git a/task/output.h b/task/output.h
deleted file mode 100644
index ab7dafc..0000000
--- a/task/output.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* output.h - Output routines interfaces.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#ifndef _OUTPUT_H
-#define _OUTPUT_H 1
-
-
-/* Print the single character CHR on the output device. */
-int putchar (int chr);
-
-int puts (const char *str);
-
-int printf (const char *fmt, ...);
-
-/* This is not an output function, but it is part of the panic()
- macro. */
-void __attribute__((__noreturn__)) shutdown (void);
-
-
-/* True if debug mode is enabled. */
-extern int output_debug;
-
-/* Print a debug message. */
-#define debug(fmt, ...) \
- ({ \
- extern char program_name[]; \
- if (output_debug) \
- printf ("%s:%s: " fmt, program_name, \
- __FUNCTION__, ##__VA_ARGS__); \
- })
-
-
-/* The program name. */
-extern char program_name[];
-
-/* Print an error message and fail. */
-#define panic(...) \
- ({ \
- printf ("%s: %s: error: ", program_name, __func__); \
- printf (__VA_ARGS__); \
- putchar ('\n'); \
- shutdown (); \
- })
-
-#endif /* _OUTPUT_H */
diff --git a/task/task-class.c b/task/task-class.c
deleted file mode 100644
index a80f241..0000000
--- a/task/task-class.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/* task-class.c - Task class for the task server.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdlib.h>
-
-#include <l4.h>
-#include <hurd/cap-server.h>
-#include <hurd/wortel.h>
-
-#include "task.h"
-
-
-static void
-task_reinit (hurd_cap_class_t cap_class, hurd_cap_obj_t obj)
-{
- task_t task = hurd_cap_obj_to_user (task_t, obj);
- thread_t thread;
-
- thread = task->threads;
-
- /* Destroy all threads. */
- while (thread)
- {
- /* FIXME: We are ignoring an error here. */
- wortel_thread_control (thread->thread_id, l4_nilthread, l4_nilthread,
- l4_nilthread, (void *) -1);
- thread_dealloc (thread);
- thread = thread->next;
- }
-
- /* FIXME: Return the task ID to the list of free task IDs for future
- allocation. */
-}
-
-
-error_t
-task_thread_alloc (hurd_cap_rpc_context_t ctx)
-{
- task_t task = hurd_cap_obj_to_user (task_t, ctx->obj);
- error_t err;
- thread_t thread;
- void *utcb;
- l4_word_t result;
-
- /* Does not need to be checked. */
- utcb = (void *) l4_msg_word (ctx->msg, 1);
-
- err = thread_alloc (&thread);
- if (err)
- return err;
-
- thread->thread_id = l4_global_id (l4_thread_no (thread->thread_id),
- task->task_id);
-
- /* Put the thread into the task as an active thread. FIXME:
- Scheduler. */
- result = wortel_thread_control (thread->thread_id, task->threads->thread_id,
- l4_myself (), thread->thread_id, utcb);
- if (result)
- {
- /* FIXME: Convert error codes in wortel.h. */
- thread_dealloc (thread);
- return EINVAL;
- }
-
- thread->next = task->threads;
- task->threads = thread;
- task->nr_threads++;
-
- /* Prepare reply message. */
- l4_msg_clear (ctx->msg);
- l4_msg_append_word (ctx->msg, thread->thread_id);
-
- return 0;
-}
-
-
-error_t
-task_demuxer (hurd_cap_rpc_context_t ctx)
-{
- error_t err = 0;
-
- switch (l4_msg_label (ctx->msg))
- {
- /* TASK_THREAD_ALLOC */
- case 512:
- err = task_thread_alloc (ctx);
- break;
-
- default:
- err = EOPNOTSUPP;
- }
-
- return err;
-}
-
-
-
-static struct hurd_cap_class task_class;
-
-/* Initialize the task class subsystem. */
-error_t
-task_class_init ()
-{
- return hurd_cap_class_init (&task_class, task_t,
- NULL, NULL, task_reinit, NULL,
- task_demuxer);
-}
-
-
-/* Allocate a new task object with the task ID TASK_ID and the
- NR_THREADS threads listed in THREADS (which are already allocated
- for that task. The object returned is locked and has one
- reference. */
-error_t
-task_alloc (l4_word_t task_id, unsigned int nr_threads,
- l4_thread_id_t *threads, task_t *r_task)
-{
- error_t err;
- hurd_cap_obj_t obj;
- task_t task;
-
- err = hurd_cap_class_alloc (&task_class, &obj);
- if (err)
- return err;
- task = hurd_cap_obj_to_user (task_t, obj);
-
- task->task_id = task_id;
-
- /* Add the threads from back to front. */
- task->threads = NULL;
-
- while (nr_threads--)
- {
- thread_t thread;
-
- err = thread_alloc_with_id (threads[nr_threads], &thread);
- if (err)
- {
- /* Roll back the thread creation manually to defeat the
- automatic deallocation routines, which will actually go
- and kill those wortel-provided threads. */
- thread = task->threads;
- while (thread)
- {
- thread->thread_id = l4_nilthread;
- thread_dealloc (thread);
- thread = thread->next;
- }
-
- task->threads = NULL;
- task->nr_threads = 0;
- hurd_cap_obj_drop (obj);
-
- return err;
- }
-
- thread->next = task->threads;
- task->threads = thread;
- }
- task->nr_threads = nr_threads;
-
- *r_task = task;
- return 0;
-}
diff --git a/task/task-id.c b/task/task-id.c
deleted file mode 100644
index ec51d05..0000000
--- a/task/task-id.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/* task-id.c - Manage task IDs.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stddef.h>
-
-#include "task.h"
-
-
-/* The hash table mapping task IDs to tasks. */
-struct hurd_ihash task_id_to_task
- = HURD_IHASH_INITIALIZER (offsetof (struct task, locp));
-
-/* The lock protecting the task_it_to_task hash table and associated
- data. */
-pthread_mutex_t task_id_to_task_lock = PTHREAD_MUTEX_INITIALIZER;
-
-#define task_id_is_free(task_id) \
- (hurd_ihash_find (&task_id_to_task, (task_id)) == NULL)
-
-
-/* Enter the task TASK under its ID into the hash table, consuming one
- reference. Mainly used by the bootstrap functions. */
-error_t
-task_id_enter (task_t task)
-{
- error_t err;
-
- pthread_mutex_lock (&task_id_to_task_lock);
- err = hurd_ihash_add (&task_id_to_task, task->task_id, task);
- pthread_mutex_unlock (&task_id_to_task_lock);
-
- return err;
-}
-
-
-/* Increment the task_id_next marker. */
-static inline hurd_task_id_t
-task_id_inc (hurd_task_id_t task_id)
-{
- /* We know that either the next task ID or the one after it is
- valid. So we manually unroll the loop here. */
-
- task_id++;
- if (! L4_THREAD_VERSION_VALID (task_id))
- task_id++;
-
- return task_id;
-}
-
-
-/* Find a free task ID, enter the task TASK into the hash table under
- this ID, consuming one reference, and return the new task ID. If
- no free task ID is available, EAGAIN is returned. */
-error_t
-task_id_add (task_t task, hurd_task_id_t *task_id_p)
-{
- /* Zero is an invalid task ID. But last_task_id will be incremented
- to the next valid task ID before the first allocation takes
- place. This variable is protected by task_id_to_task_lock. */
- static hurd_task_id_t last_task_id;
- error_t err = 0;
- hurd_task_id_t task_id;
-
- pthread_mutex_lock (&task_id_to_task_lock);
-
- /* Find next valid task ID. */
- task_id = task_id_inc (last_task_id);
-
- if (__builtin_expect (! task_id_is_free (task_id), 0))
- {
- /* Slow path. The next task ID is taken. Skip forward until we
- find a free one. */
-
- /* The first task ID we tried. */
- hurd_task_id_t first_task_id = task_id;
-
- do
- task_id = task_id_inc (task_id);
- while (task_id != first_task_id && !task_id_is_free (task_id));
-
- /* Check if we wrapped over and ended up where we started. */
- if (task_id == first_task_id)
- err = EAGAIN;
- }
-
- if (__builtin_expect (!err, 1))
- {
- err = hurd_ihash_add (&task_id_to_task, task_id, task);
- if (__builtin_expect (!err, 1))
- {
- task->task_id = task_id;
- *task_id_p = task_id;
- last_task_id = task_id;
- }
- }
-
- pthread_mutex_unlock (&task_id_to_task_lock);
-
- return err;
-}
diff --git a/task/task.c b/task/task.c
deleted file mode 100644
index 0e6abc7..0000000
--- a/task/task.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/* Main function for the task server.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <stdlib.h>
-#include <pthread.h>
-
-#include <hurd/startup.h>
-#include <hurd/wortel.h>
-
-#include "task.h"
-
-
-/* Initialized by the machine-specific startup-code. */
-extern struct hurd_startup_data *__hurd_startup_data;
-
-
-/* The program name. */
-char program_name[] = "task";
-
-
-/* The following functions are required by pthread. */
-
-void
-__attribute__ ((__noreturn__))
-exit (int __status)
-{
- panic ("exit() called");
-}
-
-
-void
-abort (void)
-{
- panic ("abort() called");
-}
-
-
-/* FIXME: Should be elsewhere. Needed by libhurd-slab. */
-int
-getpagesize()
-{
- return l4_min_page_size ();
-}
-
-
-void
-create_bootstrap_caps (hurd_cap_bucket_t bucket)
-{
- error_t err;
- hurd_cap_handle_t cap;
- hurd_cap_obj_t obj;
- task_t task;
-
- l4_accept (L4_UNTYPED_WORDS_ACCEPTOR);
-
- while (1)
- {
- hurd_task_id_t task_id;
- unsigned int nr_threads;
- l4_thread_id_t threads[L4_NUM_MRS];
-
- task_id = wortel_get_task_cap_request (&nr_threads, threads);
-
- if (nr_threads == 0)
- {
- /* This requests the master control capability. */
-
- /* FIXME: Create capability. */
- /* FIXME: Use our control cap for this task here. */
- wortel_get_task_cap_reply (0xf00);
-
- /* This is the last request made. */
- return;
- }
- else
- {
- debug ("Creating task cap for 0x%x:", task_id);
-
- err = task_alloc (task_id, nr_threads, threads, &task);
- if (err)
- panic ("task_alloc: %i", err);
-
- obj = hurd_cap_obj_from_user (task_t, task);
- hurd_cap_obj_unlock (obj);
-
- err = task_id_enter (task);
- if (err)
- panic ("task_id_enter: %i", err);
-
- err = hurd_cap_bucket_inject (bucket, obj, task_id, &cap);
- if (err)
- panic ("hurd_cap_bucket_inject: %i", err);
-
- hurd_cap_obj_lock (obj);
- hurd_cap_obj_drop (obj);
-
- debug (" 0x%x\n", cap);
-
- /* Return CAP. */
- wortel_get_task_cap_reply (cap);
- }
- }
-}
-
-
-/* Get our task ID. */
-static l4_word_t
-get_task_id ()
-{
- return l4_version (l4_my_global_id ());
-}
-
-
-/* Initialize the thread support, and return the L4 thread ID to be
- used for the server thread. */
-static l4_thread_id_t
-setup_threads (void)
-{
- l4_word_t err;
- l4_word_t first_free_thread_no;
- pthread_t thread;
- l4_thread_id_t server_thread;
- l4_thread_id_t main_thread;
- l4_thread_id_t extra_thread;
- l4_thread_id_t pager;
-
- first_free_thread_no = wortel_get_first_free_thread_no ();
-
- /* Use the first free thread as main thread. */
- main_thread = l4_global_id (first_free_thread_no, get_task_id ());
- server_thread = l4_my_global_id ();
-
- /* Create the main thread as an active thread. The scheduler is
- us. */
- err = wortel_thread_control (main_thread, l4_myself (), l4_myself (),
- main_thread,
- (void *)
- (l4_address (__hurd_startup_data->utcb_area)
- + l4_utcb_size ()));
- if (err)
- panic ("could not create main task thread: %s", l4_strerror (err));
-
- /* Switch threads. We still need the current main thread as the
- server thread. */
- pager = l4_pager ();
- switch_thread (server_thread, main_thread);
- l4_set_pager (pager);
-
- /* Create the main thread. */
- err = pthread_create (&thread, 0, 0, 0);
-
- if (err)
- panic ("could not create main thread: %i", err);
-
- /* FIXME: This is unecessary as soon as we implement this properly
- in pthread (of course, within the task server, we will use an
- override to not actually make an RPC to ourselves. */
-
- /* Now add the remaining extra threads to the pool. */
- extra_thread = l4_global_id (first_free_thread_no + 1, get_task_id ());
- err = wortel_thread_control (extra_thread, l4_myself (), l4_myself (),
- extra_thread,
- (void *)
- (l4_address (__hurd_startup_data->utcb_area)
- + 2 * l4_utcb_size ()));
- pthread_pool_add_np (extra_thread);
-
- extra_thread = l4_global_id (first_free_thread_no + 2, get_task_id ());
- err = wortel_thread_control (extra_thread, l4_myself (), l4_myself (),
- extra_thread,
- (void *)
- (l4_address (__hurd_startup_data->utcb_area)
- + 3 * l4_utcb_size ()));
- pthread_pool_add_np (extra_thread);
-
- /* FIXME: Look up the real limits on the KIP, or get them from wortel. */
- thread_set_range (l4_global_id (first_free_thread_no + 3, 1),
- l4_global_id (first_free_thread_no & 0xffff, 1));
-
- return server_thread;
-}
-
-
-void *
-task_server (void *arg)
-{
- hurd_cap_bucket_t bucket = (hurd_cap_bucket_t) arg;
- error_t err;
-
- /* No root object is provided by the task server. */
- /* FIXME: Use a worker timeout. */
- /* FIXME: Use a no-sender callback that deletes the resources from a
- dead task and turns it into a zombie or removes it from the hash
- table completely. */
- err = hurd_cap_bucket_manage_mt (bucket, NULL, 0, 0);
- if (err)
- debug ("bucket_manage_mt failed: %i\n", err);
-
- panic ("bucket_manage_mt returned!");
-}
-
-
-static void
-bootstrap_final (void)
-{
- l4_thread_id_t task_server;
- hurd_cap_handle_t task_cap;
- l4_thread_id_t deva_server;
- hurd_cap_handle_t deva_cap;
-
- wortel_bootstrap_final (&task_server, &task_cap, &deva_server, &deva_cap);
-
- /* FIXME: Do something with the task cap. */
-}
-
-
-int
-main (int argc, char *argv[])
-{
- error_t err;
- l4_thread_id_t server_thread;
- hurd_cap_bucket_t bucket;
- pthread_t manager;
-
- output_debug = 1;
-
- debug ("%s " PACKAGE_VERSION "\n", program_name);
-
- server_thread = setup_threads ();
-
- /* FIXME: Start the scheduler. */
-
- err = task_class_init ();
- if (err)
- panic ("task_class_init: %i", err);
-
- err = hurd_cap_bucket_create (&bucket);
- if (err)
- panic ("bucket_create: %i", err);
-
- create_bootstrap_caps (bucket);
-
- /* Create the server thread and start serving RPC requests. */
- err = pthread_create_from_l4_tid_np (&manager, NULL, server_thread,
- task_server, bucket);
-
- if (err)
- panic ("pthread_create_from_l4_tid_np: %i", err);
- pthread_detach (manager);
-
- bootstrap_final ();
-
- /* FIXME: Eventually, add shutdown support on wortels(?)
- request. */
- while (1)
- l4_sleep (L4_NEVER);
-
- return 0;
-}
diff --git a/task/task.h b/task/task.h
deleted file mode 100644
index f9f83fb..0000000
--- a/task/task.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* task.h - Generic definitions.
- Copyright (C) 2004 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#ifndef TASK_H
-#define TASK_H 1
-
-#include <errno.h>
-
-#include <l4.h>
-#include <hurd/cap-server.h>
-#include <hurd/ihash.h>
-
-#include "output.h"
-
-
-/* The program name. */
-extern char program_name[];
-
-#define BUG_ADDRESS "<bug-hurd@gnu.org>"
-
-int main (int argc, char *argv[]);
-
-
-/* The following function must be defined by the architecture
- dependent code. */
-
-/* Switch execution transparently to thread TO. The thread FROM,
- which must be the current thread, will be halted. */
-void switch_thread (l4_thread_id_t from, l4_thread_id_t to);
-
-
-/* Thread objects. These are not capabilities, but components of task
- objects. */
-struct thread
-{
- /* The next pointer in a linked list of threads. */
- struct thread *next;
-
- /* The thread ID of the thread. The version part is the task_id the
- thread is assigned to, or undefined if the thread is unassigned.
- The thread number is determined at construction and fixed. */
- l4_thread_id_t thread_id;
-
- /* FIXME: More members like priority, CPU usage etc. */
-};
-typedef struct thread *thread_t;
-
-
-/* Set the range of thread IDs that we are allowed to allocate. */
-void thread_set_range (l4_thread_id_t first, l4_thread_id_t last);
-
-/* Allocate a new thread object with the thread ID THREAD_ID and
- return it in THREAD. Only used at bootstrap. */
-error_t thread_alloc_with_id (l4_thread_id_t thread_id, thread_t *thread);
-
-/* Allocate a new thread object and return it in THREAD. */
-error_t thread_alloc (thread_t *thread);
-
-/* Deallocate the thread THREAD. */
-void thread_dealloc (thread_t thread);
-
-
-/* Task objects. */
-
-struct task
-{
- /* This is for fast removal from the task_id_to_task hash table. */
- hurd_ihash_locp_t locp;
-
- /* The task ID is used in the version field of the global thread ID,
- so it is limited to L4_THREAD_VERSION_BITS (14/32) bits and must
- not have its lower 6 bits set to all zero (because that indicates
- a local thread ID). */
- hurd_task_id_t task_id;
-
- /* The threads in this task. */
- unsigned int nr_threads;
- thread_t threads;
-};
-typedef struct task *task_t;
-
-
-/* Initialize the task class subsystem. */
-error_t task_class_init ();
-
-/* Allocate a new task object with the task ID TASK_ID and the
- NR_THREADS threads listed in THREADS (which are already allocated
- for that task. The object returned is locked and has one
- reference. */
-error_t task_alloc (l4_word_t task_id, unsigned int nr_threads,
- l4_thread_id_t *threads, task_t *r_task);
-
-
-extern pthread_mutex_t task_id_to_task_lock;
-
-/* The hash table mapping task IDs to tasks. */
-extern struct hurd_ihash task_id_to_task;
-
-/* Acquire a reference for the task with the task ID TASK_ID and
- return the task object. If the task ID is not valid, return
- NULL. */
-static inline task_t
-task_id_get_task (hurd_task_id_t task_id)
-{
- task_t task;
-
- pthread_mutex_lock (&task_id_to_task_lock);
- task = hurd_ihash_find (&task_id_to_task, task_id);
- if (task)
- {
- hurd_cap_obj_t obj = hurd_cap_obj_from_user (task_t, task);
- hurd_cap_obj_ref (obj);
- }
- pthread_mutex_unlock (&task_id_to_task_lock);
-
- return task;
-}
-
-
-/* Enter the task TASK under its ID into the hash table, consuming one
- reference. Mainly used by the bootstrap functions. */
-error_t task_id_enter (task_t task);
-
-/* Find a free task ID, enter the task TASK (which must not be locked)
- into the hash table under this ID, acquiring reference. The new
- task ID is returned in TASK_ID. If no free task ID is available,
- EAGAIN is returned. */
-error_t task_id_add (task_t task, hurd_task_id_t *task_id_p);
-
-#endif /* TASK_H */
diff --git a/task/thread.c b/task/thread.c
deleted file mode 100644
index b2b4e0e..0000000
--- a/task/thread.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* thread.c - Manage threads.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
- Written by Marcus Brinkmann.
-
- This file is part of the GNU Hurd.
-
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
-
- The GNU Hurd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-
-#if HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <assert.h>
-
-#include <l4.h>
-
-#include <hurd/slab.h>
-
-#include "task.h"
-
-
-/* Initialize the slab object pointed to by BUFFER. HOOK is as
- provided to hurd_slab_create. */
-static error_t
-thread_constructor (void *hook, void *buffer)
-{
- thread_t thread = (thread_t) buffer;
-
- thread->next = NULL;
- thread->thread_id = l4_nilthread;
-
- return 0;
-}
-
-
-/* The slab space containing all thread objects. As this is the only
- place where we keep track of used and free thread IDs, it must
- never be reaped (so no destructor is needed). */
-static struct hurd_slab_space threads
- = HURD_SLAB_SPACE_INITIALIZER (struct thread, NULL, NULL,
- thread_constructor, NULL, NULL);
-
-/* The lock protecting the threads slab. */
-static pthread_mutex_t threads_lock = PTHREAD_MUTEX_INITIALIZER;
-
-
-/* The thread numbers are allocated sequentially starting from a first
- number and ending at a maximum number, which are set by
- thread_set_range. */
-static l4_thread_id_t next_thread_id = l4_nilthread;
-static l4_thread_id_t last_thread_id;
-
-
-/* Set the range of thread IDs that we are allowed to allocate. */
-void
-thread_set_range (l4_thread_id_t first, l4_thread_id_t last)
-{
- pthread_mutex_lock (&threads_lock);
- next_thread_id = first;
- last_thread_id = last;
- pthread_mutex_unlock (&threads_lock);
-}
-
-
-
-/* Allocate a new thread object with the thread ID THREAD_ID and
- return it in THREAD. Only used at bootstrap. */
-error_t
-thread_alloc_with_id (l4_thread_id_t thread_id, thread_t *r_thread)
-{
- error_t err;
- thread_t thread;
- union
- {
- void *buffer;
- thread_t thread;
- } u;
-
- pthread_mutex_lock (&threads_lock);
- err = hurd_slab_alloc (&threads, &u.buffer);
- thread = u.thread;
- if (!err)
- {
- assert (thread->thread_id == l4_nilthread);
-
- thread->thread_id = thread_id;
- }
- pthread_mutex_unlock (&threads_lock);
-
- *r_thread = thread;
- return err;
-}
-
-
-/* Allocate a new thread object and return it in THREAD. */
-error_t
-thread_alloc (thread_t *r_thread)
-{
- error_t err;
- thread_t thread;
- union
- {
- void *buffer;
- thread_t thread;
- } u;
-
- pthread_mutex_lock (&threads_lock);
- err = hurd_slab_alloc (&threads, &u.buffer);
- thread = u.thread;
- if (__builtin_expect (!err, 1))
- {
- if (__builtin_expect (thread->thread_id == l4_nilthread, 0))
- {
- if (__builtin_expect (next_thread_id == l4_nilthread, 0))
- err = EAGAIN;
- else
- {
- thread->thread_id = next_thread_id;
-
- if (__builtin_expect (next_thread_id == last_thread_id, 0))
- next_thread_id = l4_nilthread;
- else
- /* The version number is arbitrary here. */
- next_thread_id
- = l4_global_id (l4_thread_no (next_thread_id) + 1, 1);
- }
- }
- }
- pthread_mutex_unlock (&threads_lock);
-
- *r_thread = thread;
- return err;
-}
-
-
-/* Deallocate the thread THREAD. */
-void
-thread_dealloc (thread_t thread)
-{
- pthread_mutex_lock (&threads_lock);
- hurd_slab_dealloc (& threads, thread);
- pthread_mutex_unlock (&threads_lock);
-}