summaryrefslogtreecommitdiff
path: root/libhurd-mm
diff options
context:
space:
mode:
authorNeal H. Walfield <neal@gnu.org>2008-12-16 19:51:10 +0100
committerNeal H. Walfield <neal@gnu.org>2008-12-16 19:51:10 +0100
commit86ae694912be3860f6a81bb86da118be73ace11f (patch)
treec8e2a9e26eb0d565f219f5aab59ea5ca1b835cd6 /libhurd-mm
parent465d6fa4fcf836482088e5e263fc26da03feff44 (diff)
Replace L4 types and functions with standard types or viengoos
functions. hurd/ 2008-12-16 Neal H. Walfield <neal@gnu.org> * addr-trans.h: Don't include <l4/types.h>. Include <stdint.h> and <hurd/math.h>. (struct cap_addr_trans): Replace l4 types with standard types. (CAP_ADDR_TRANS_GUARD): Likewise. (CAP_ADDR_TRANS_SET_GUARD_SUBPAGE): Use vg_msb, not l4_msb. (CAP_ADDR_TRANS_VALID): Replace l4 types with standard types. Use vg_msb, not l4_msb. * t-addr.c (main): Replace use of l4_msb64 with vg_msg64 and l4_lsb64 with vg_lsg64. * types.h: Don't include <l4/types.h>. libhurd-mm/ 2008-12-16 Neal H. Walfield <neal@gnu.org> * anonymous.c (anonymous_pager_alloc): Replace use of l4_msb with vg_msb. * as-build.c (as_build): Replace use of l4_msb64 with vg_msb64. * as-dump.c (print_nr): Replace use of l4_int64_t with int64_t. * as-lookup.c (as_lookup_rel_internal): Replace use of l4_word_t with uintptr_t and l4_uint64_t with uint64_t. * as.h (as_alloc): Replace use of l4_uint64_t with uint64_t. (as_alloc_at): Likewise. (as_free): Likewise. (AS_CHECK_SHADOW): Replace use of l4_word_t with uintptr_t. (as_walk): Likewise. * as.c (struct region): Replace use of l4_uint64_t with uint64_t. (free_space_split): Likewise. (as_alloc): Likewise. (as_alloc_at): Likewise. (as_free): Likewise. (as_init): Likewise. (as_alloc_slow): Replace use of l4_word_t with uintptr_t. (as_init): Likewise. (as_walk): Likewise. (as_alloc): Replace use of l4_lsb64 with vg_lsb64. (as_init): Likewise.
Diffstat (limited to 'libhurd-mm')
-rw-r--r--libhurd-mm/ChangeLog25
-rw-r--r--libhurd-mm/anonymous.c4
-rw-r--r--libhurd-mm/as-build.c6
-rw-r--r--libhurd-mm/as-dump.c4
-rw-r--r--libhurd-mm/as-lookup.c4
-rw-r--r--libhurd-mm/as.c56
-rw-r--r--libhurd-mm/as.h10
-rw-r--r--libhurd-mm/map.h2
-rw-r--r--libhurd-mm/mprotect.c2
-rw-r--r--libhurd-mm/storage.c77
10 files changed, 131 insertions, 59 deletions
diff --git a/libhurd-mm/ChangeLog b/libhurd-mm/ChangeLog
index 56d2842..05008d3 100644
--- a/libhurd-mm/ChangeLog
+++ b/libhurd-mm/ChangeLog
@@ -1,3 +1,28 @@
+2008-12-16 Neal H. Walfield <neal@gnu.org>
+
+ * anonymous.c (anonymous_pager_alloc): Replace use of l4_msb with
+ vg_msb.
+ * as-build.c (as_build): Replace use of l4_msb64 with vg_msb64.
+ * as-dump.c (print_nr): Replace use of l4_int64_t with int64_t.
+ * as-lookup.c (as_lookup_rel_internal): Replace use of l4_word_t
+ with uintptr_t and l4_uint64_t with uint64_t.
+ * as.h (as_alloc): Replace use of l4_uint64_t with uint64_t.
+ (as_alloc_at): Likewise.
+ (as_free): Likewise.
+ (AS_CHECK_SHADOW): Replace use of l4_word_t with uintptr_t.
+ (as_walk): Likewise.
+ * as.c (struct region): Replace use of l4_uint64_t with uint64_t.
+ (free_space_split): Likewise.
+ (as_alloc): Likewise.
+ (as_alloc_at): Likewise.
+ (as_free): Likewise.
+ (as_init): Likewise.
+ (as_alloc_slow): Replace use of l4_word_t with uintptr_t.
+ (as_init): Likewise.
+ (as_walk): Likewise.
+ (as_alloc): Replace use of l4_lsb64 with vg_lsb64.
+ (as_init): Likewise.
+
2008-12-12 Neal H. Walfield <neal@gnu.org>
Update to new RPC interface and IPC semantics. Support messengers.
diff --git a/libhurd-mm/anonymous.c b/libhurd-mm/anonymous.c
index c679507..dc5a78b 100644
--- a/libhurd-mm/anonymous.c
+++ b/libhurd-mm/anonymous.c
@@ -632,8 +632,8 @@ anonymous_pager_alloc (addr_t activity,
assert (fill);
count = 1;
- /* e.g., l4_msb (4k * 2 - 1) - 1 = 12. */
- width = l4_msb (length * 2 - 1) - 1;
+ /* e.g., vg_msb (4k * 2 - 1) - 1 = 12. */
+ width = vg_msb (length * 2 - 1) - 1;
if (hint)
/* We will allocate a region whose size is 1 << WIDTH. This
diff --git a/libhurd-mm/as-build.c b/libhurd-mm/as-build.c
index a44f172..728183b 100644
--- a/libhurd-mm/as-build.c
+++ b/libhurd-mm/as-build.c
@@ -356,7 +356,7 @@ ID (as_build) (activity_t activity,
if (! need_pivot)
/* The slot is available. */
{
- int space = l4_msb64 (extract_bits64 (prefix, 0, remaining));
+ int space = vg_msb64 (extract_bits64 (prefix, 0, remaining));
if (space <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* The remaining bits to translate fit in the
guard, we are done. */
@@ -377,14 +377,14 @@ ID (as_build) (activity_t activity,
if (remaining < pte_gbits)
a >>= pte_gbits - remaining;
- gbits = max - l4_msb64 (a ^ b);
+ gbits = max - vg_msb64 (a ^ b);
tilobject = pte_gbits;
}
/* Make sure that the guard to use fits in the guard
area. */
- int firstset = l4_msb64 (extract_bits64_inv (prefix,
+ int firstset = vg_msb64 (extract_bits64_inv (prefix,
remaining - 1, gbits));
if (firstset > CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* FIRSTSET is the first (most significant) non-zero guard
diff --git a/libhurd-mm/as-dump.c b/libhurd-mm/as-dump.c
index 27dfc6d..f0f8e1f 100644
--- a/libhurd-mm/as-dump.c
+++ b/libhurd-mm/as-dump.c
@@ -33,13 +33,13 @@
#endif
static void
-print_nr (int width, l4_int64_t nr, bool hex)
+print_nr (int width, int64_t nr, bool hex)
{
int base = 10;
if (hex)
base = 16;
- l4_int64_t v = nr;
+ int64_t v = nr;
int w = 0;
if (v < 0)
{
diff --git a/libhurd-mm/as-lookup.c b/libhurd-mm/as-lookup.c
index 62343dd..ca8fb30 100644
--- a/libhurd-mm/as-lookup.c
+++ b/libhurd-mm/as-lookup.c
@@ -74,8 +74,8 @@ as_lookup_rel_internal (activity_t activity,
#endif
root = start;
- l4_uint64_t addr = addr_prefix (address);
- l4_word_t remaining = addr_depth (address);
+ uint64_t addr = addr_prefix (address);
+ uintptr_t remaining = addr_depth (address);
/* The code below assumes that the REMAINING significant bits are in the
lower bits, not upper. */
addr >>= (ADDR_BITS - remaining);
diff --git a/libhurd-mm/as.c b/libhurd-mm/as.c
index b7643ab..4daed0c 100644
--- a/libhurd-mm/as.c
+++ b/libhurd-mm/as.c
@@ -59,8 +59,8 @@ l4_thread_id_t as_rwlock_owner;
O(number of allocated regions). */
struct region
{
- l4_uint64_t start;
- l4_uint64_t end;
+ uint64_t start;
+ uint64_t end;
};
struct free_space
@@ -135,7 +135,7 @@ free_space_desc_free (struct free_space *free_space)
END is completely covered by the free region F. Carve it out of
F. */
static void
-free_space_split (struct free_space *f, l4_uint64_t start, l4_uint64_t end)
+free_space_split (struct free_space *f, uint64_t start, uint64_t end)
{
assert (! ss_mutex_trylock (&free_spaces_lock));
@@ -172,12 +172,12 @@ free_space_split (struct free_space *f, l4_uint64_t start, l4_uint64_t end)
}
addr_t
-as_alloc (int width, l4_uint64_t count, bool data_mappable)
+as_alloc (int width, uint64_t count, bool data_mappable)
{
assert (as_init_done);
assert (count);
- int shift = l4_lsb64 (count) - 1;
+ int shift = vg_lsb64 (count) - 1;
int w = width + shift;
count >>= shift;
if (! data_mappable)
@@ -195,8 +195,8 @@ as_alloc (int width, l4_uint64_t count, bool data_mappable)
- ((w - PAGESIZE_LOG2) % CAPPAGE_SLOTS_LOG2));
}
- l4_uint64_t align = 1ULL << w;
- l4_uint64_t length = align * count;
+ uint64_t align = 1ULL << w;
+ uint64_t length = align * count;
ss_mutex_lock (&free_spaces_lock);
@@ -207,7 +207,7 @@ as_alloc (int width, l4_uint64_t count, bool data_mappable)
free_space;
free_space = hurd_btree_free_space_next (free_space))
{
- l4_uint64_t start;
+ uint64_t start;
start = (free_space->region.start + align - 1) & ~(align - 1);
if (start < free_space->region.end
@@ -234,11 +234,11 @@ as_alloc (int width, l4_uint64_t count, bool data_mappable)
}
bool
-as_alloc_at (addr_t addr, l4_uint64_t count)
+as_alloc_at (addr_t addr, uint64_t count)
{
- l4_uint64_t start = addr_prefix (addr);
- l4_uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
- l4_uint64_t end = start + length - 1;
+ uint64_t start = addr_prefix (addr);
+ uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
+ uint64_t end = start + length - 1;
struct region region = { start, end };
struct free_space *f;
@@ -259,11 +259,11 @@ as_alloc_at (addr_t addr, l4_uint64_t count)
}
void
-as_free (addr_t addr, l4_uint64_t count)
+as_free (addr_t addr, uint64_t count)
{
- l4_uint64_t start = addr_prefix (addr);
- l4_uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
- l4_uint64_t end = start + length - 1;
+ uint64_t start = addr_prefix (addr);
+ uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
+ uint64_t end = start + length - 1;
struct free_space *space = free_space_desc_alloc ();
/* We prefer to coalesce regions where possible. This ensures that
@@ -377,7 +377,7 @@ as_alloc_slow (int width)
addr_t slot = ADDR_VOID;
int find_free_slot (addr_t addr,
- l4_word_t type, struct cap_properties properties,
+ uintptr_t type, struct cap_properties properties,
bool writable,
void *cookie)
{
@@ -393,8 +393,8 @@ as_alloc_slow (int width)
if (! writable)
return 0;
- l4_uint64_t start = addr_prefix (addr);
- l4_uint64_t end = start + (1 << width) - 1;
+ uint64_t start = addr_prefix (addr);
+ uint64_t end = start + (1 << width) - 1;
if (end >= DATA_ADDR_MAX)
return 0;
@@ -445,7 +445,7 @@ as_alloc_slow (int width)
slot = addr_extend (slot, 0, gbits);
/* Fill in a descriptor. */
- assertx ((((l4_word_t) &desc_additional[0]) & (PAGESIZE - 1)) == 0,
+ assertx ((((uintptr_t) &desc_additional[0]) & (PAGESIZE - 1)) == 0,
"%p", &desc_additional[0]);
debug (5, "Allocating space for " ADDR_FMT
@@ -502,7 +502,7 @@ as_init (void)
debug (5, "Adding object " ADDR_FMT " (%s)",
ADDR_PRINTF (addr), cap_type_string (desc->type));
- l4_word_t type;
+ uintptr_t type;
struct cap_properties properties;
err = rm_cap_read (meta_data_activity, ADDR_VOID, addr,
&type, &properties);
@@ -585,7 +585,7 @@ as_init (void)
shadowed AS. */
/* Which depths have objects. */
- l4_uint64_t depths = 0;
+ uint64_t depths = 0;
struct hurd_object_desc *desc;
int i;
@@ -599,7 +599,7 @@ as_init (void)
while (depths)
{
- int depth = l4_lsb64 (depths) - 1;
+ int depth = vg_lsb64 (depths) - 1;
depths &= ~(1ULL << depth);
for (i = 0, desc = &__hurd_startup_data->descs[0];
@@ -646,7 +646,7 @@ as_init (void)
/* Walk the address space the hard way and make sure that we've got
everything. */
int visit (addr_t addr,
- l4_word_t type, struct cap_properties properties,
+ uintptr_t type, struct cap_properties properties,
bool writable, void *cookie)
{
debug (5, "Checking that " ADDR_FMT " is a %s",
@@ -727,7 +727,7 @@ as_alloced_dump (const char *prefix)
value is returned. If the walk is not aborted, 0 is returned. */
int
as_walk (int (*visit) (addr_t addr,
- l4_word_t type, struct cap_properties properties,
+ uintptr_t type, struct cap_properties properties,
bool writable,
void *cookie),
int types,
@@ -750,12 +750,12 @@ as_walk (int (*visit) (addr_t addr,
error_t err;
struct cap_properties properties;
- l4_word_t type;
+ uintptr_t type;
/* Just caching the root capability cuts the number of RPCs by
about 25%. */
struct cap_properties root_properties;
- l4_word_t root_type;
+ uintptr_t root_type;
err = rm_cap_read (meta_data_activity, ADDR_VOID,
ADDR (0, 0), &root_type, &root_properties);
@@ -930,7 +930,7 @@ as_walk (int (*visit) (addr_t addr,
/* We have the shadow page tables and presumably a normal stack. */
int do_walk (struct cap *cap, addr_t addr, bool writable)
{
- l4_word_t type;
+ uintptr_t type;
struct cap_properties cap_properties;
type = cap->type;
diff --git a/libhurd-mm/as.h b/libhurd-mm/as.h
index 4b0a448..1e281b6 100644
--- a/libhurd-mm/as.h
+++ b/libhurd-mm/as.h
@@ -39,7 +39,7 @@
ensures that the leaves of each subtree are mappable in the region
accessible to data instructions. On success returns the address of
the first subtree. Otherwise, returns ADDR_VOID. */
-extern addr_t as_alloc (int width, l4_uint64_t count,
+extern addr_t as_alloc (int width, uint64_t count,
bool data_mappable);
/* Like as_alloc but may be called before as_init is called. Address
@@ -49,14 +49,14 @@ extern struct hurd_object_desc *as_alloc_slow (int width);
/* Allocate the COUNT contiguous addresses strating at address ADDR.
Returns true on success, false otherwise. */
-extern bool as_alloc_at (addr_t addr, l4_uint64_t count);
+extern bool as_alloc_at (addr_t addr, uint64_t count);
/* Free the COUNT contiguous addresses starting at ADDR. Each ADDR
must have been previously returned by a call to as_chunk_alloc or
as_region_alloc. All address returned by a call to as_chunk_alloc
or as_region_alloc need not be freed by a single call to
as_free. */
-extern void as_free (addr_t addr, l4_uint64_t count);
+extern void as_free (addr_t addr, uint64_t count);
/* Whether as_init has completed. */
extern bool as_init_done;
@@ -200,7 +200,7 @@ extern struct cap shadow_root;
__acs_code) \
do \
{ \
- l4_word_t __acs_type = -1; \
+ uintptr_t __acs_type = -1; \
struct cap_properties __acs_p; \
error_t __acs_err; \
\
@@ -656,7 +656,7 @@ as_dump_path (addr_t addr)
returns a non-zero value, the walk is aborted and that value is
returned. If the walk is not aborted, 0 is returned. */
extern int as_walk (int (*visit) (addr_t cap,
- l4_word_t type,
+ uintptr_t type,
struct cap_properties properties,
bool writable,
void *cookie),
diff --git a/libhurd-mm/map.h b/libhurd-mm/map.h
index febc3ea..382a6e9 100644
--- a/libhurd-mm/map.h
+++ b/libhurd-mm/map.h
@@ -206,7 +206,7 @@ map_find (struct region region)
struct map *map = hurd_btree_map_find (&maps, &region);
if (! map)
{
- debug (3, "No map covers %x-%x",
+ debug (5, "No map covers %x-%x",
region.start, region.start + region.length - 1);
return NULL;
}
diff --git a/libhurd-mm/mprotect.c b/libhurd-mm/mprotect.c
index 003feba..10cf1de 100644
--- a/libhurd-mm/mprotect.c
+++ b/libhurd-mm/mprotect.c
@@ -34,7 +34,7 @@ mprotect (void *addr, size_t length, int prot)
uintptr_t start = (uintptr_t) addr;
uintptr_t end = start + length - 1;
- debug (5, "(%p, %x (%p),%s%s)", addr, length, end,
+ debug (5, "(%p, %x (%p),%s%s)", addr, length, (void *) end,
prot == 0 ? " PROT_NONE" : (prot & PROT_READ ? " PROT_READ" : ""),
prot & PROT_WRITE ? " PROT_WRITE" : "");
diff --git a/libhurd-mm/storage.c b/libhurd-mm/storage.c
index cfdb6ed..89afb7e 100644
--- a/libhurd-mm/storage.c
+++ b/libhurd-mm/storage.c
@@ -72,7 +72,9 @@ struct storage_desc
/* Protects all members above here. This lock may be taken if
STORAGE_DESCS_LOCK is held. */
ss_mutex_t lock;
-
+#ifndef NDEBUG
+ l4_thread_id_t owner;
+#endif
/* Each storage area is stored in a btree keyed by the address of
the folio. Protected by STORAGE_DESCS_LOCK. */
@@ -269,12 +271,19 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
ss_mutex_lock (&storage_descs_lock);
ss_mutex_lock (&desc->lock);
+#ifndef NDEBUG
+ assert (desc->owner == l4_nilthread);
+ desc->owner = l4_myself ();
+#endif
/* DESC->FREE may be zero if someone came along and deallocated
a page between our dropping and retaking the lock. */
if (desc->free == 0)
list_unlink (desc);
+#ifndef NDEBUG
+ desc->owner = l4_nilthread;
+#endif
ss_mutex_unlock (&desc->lock);
ss_mutex_unlock (&storage_descs_lock);
}
@@ -325,14 +334,25 @@ storage_shadow_setup (struct cap *cap, addr_t folio)
static bool storage_init_done;
+static int
+num_threads (void)
+{
+ extern int __pthread_num_threads __attribute__ ((weak));
+
+ if (&__pthread_num_threads)
+ return __pthread_num_threads;
+ else
+ return 1;
+}
+
/* The minimum number of pages that should be available. This should
probably be per-thread (or at least per-CPU). */
-#define FREE_PAGES_LOW_WATER 64
+#define FREE_PAGES_LOW_WATER (32 + 16 * num_threads ())
/* If the number of free pages drops below this amount, the we might
soon have a problem. In this case, we serialize access to the pool
of available pages to allow some thread that is able to allocate
more pages the chance to do so. */
-#define FREE_PAGES_SERIALIZE 32
+#define FREE_PAGES_SERIALIZE (16 + 8 * num_threads ())
static pthread_mutex_t storage_low_mutex
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
@@ -531,11 +551,13 @@ storage_alloc (addr_t activity,
int tries = 0;
do
{
- if (tries ++ == 5)
+ if (++ tries == 5)
{
backtrace_print ();
- debug (0, "Failing to get storage (free count: %d). Live lock?",
- free_count);
+ debug (0, "Failing to get storage (free count: %d). Live lock? "
+ "(%p; %p; %p)",
+ free_count,
+ short_lived, long_lived_allocing, long_lived_freeing);
}
storage_check_reserve_internal (do_allocate, meta_data_activity,
@@ -551,7 +573,17 @@ storage_alloc (addr_t activity,
lead to allocating additional storage areas, however, this
should be proportional to the contention. */
if (ss_mutex_trylock (&list->lock))
- return list;
+ {
+#ifndef NDEBUG
+ assert (list->owner == l4_nilthread);
+ list->owner = l4_myself ();
+#endif
+ return list;
+ }
+
+ if (tries == 5)
+ debug (0, ADDR_FMT " locked by %x; contains %d free objects",
+ ADDR_PRINTF (list->folio), list->owner, list->free);
list = list->next;
}
@@ -634,14 +666,6 @@ storage_alloc (addr_t activity,
ss_mutex_unlock (&storage_descs_lock);
}
- addr_t a = addr;
- error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
- &a, NULL);
- assertx (! err,
- "Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
- idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
- assert (ADDR_EQ (a, addr));
-
struct object *shadow = desc->shadow;
struct cap *cap = NULL;
if (likely (!! shadow))
@@ -654,8 +678,20 @@ storage_alloc (addr_t activity,
assert (! as_init_done);
/* We drop DESC->LOCK. */
+#ifndef NDEBUG
+ assert (desc->owner == l4_myself ());
+ desc->owner = l4_nilthread;
+#endif
ss_mutex_unlock (&desc->lock);
+ addr_t a = addr;
+ error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
+ &a, NULL);
+ assertx (! err,
+ "Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
+ idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
+ assert (ADDR_EQ (a, addr));
+
if (! ADDR_IS_VOID (addr))
/* We also have to update the shadow for ADDR. Unfortunately, we
don't have the cap although the caller might. */
@@ -716,6 +752,10 @@ storage_free_ (addr_t object, bool unmap_now)
ADDR_PRINTF (object));
ss_mutex_lock (&storage->lock);
+#ifndef NDEBUG
+ assert (storage->owner == l4_nilthread);
+ storage->owner = l4_myself ();
+#endif
storage->free ++;
@@ -813,6 +853,10 @@ storage_free_ (addr_t object, bool unmap_now)
else
assert (! as_init_done);
+#ifndef NDEBUG
+ assert (storage->owner == l4_myself ());
+ storage->owner = l4_nilthread;
+#endif
ss_mutex_unlock (&storage->lock);
}
@@ -861,6 +905,9 @@ storage_init (void)
sdesc = storage_desc_alloc ();
sdesc->lock = (ss_mutex_t) 0;
+#ifndef NDEBUG
+ sdesc->owner = l4_nilthread;
+#endif
sdesc->folio = folio;
sdesc->free = FOLIO_OBJECTS;
sdesc->mode = LONG_LIVED_ALLOCING;