summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeal H. Walfield <neal@gnu.org>2008-12-16 20:04:46 +0100
committerNeal H. Walfield <neal@gnu.org>2008-12-16 20:04:46 +0100
commit1e723064be94fb07fab1d4c50eeab34e6dd29bd1 (patch)
treec8e2a9e26eb0d565f219f5aab59ea5ca1b835cd6
parentcd09ca0b681ca964dcee2c5b72311a33ca6f1cb1 (diff)
parent86ae694912be3860f6a81bb86da118be73ace11f (diff)
Merge branch 'master' of ssh://neal@git.sv.gnu.org/srv/git/hurd/viengoos
-rw-r--r--libhurd-mm/storage.c77
1 files changed, 62 insertions, 15 deletions
diff --git a/libhurd-mm/storage.c b/libhurd-mm/storage.c
index cfdb6ed..89afb7e 100644
--- a/libhurd-mm/storage.c
+++ b/libhurd-mm/storage.c
@@ -72,7 +72,9 @@ struct storage_desc
/* Protects all members above here. This lock may be taken if
STORAGE_DESCS_LOCK is held. */
ss_mutex_t lock;
-
+#ifndef NDEBUG
+ l4_thread_id_t owner;
+#endif
/* Each storage area is stored in a btree keyed by the address of
the folio. Protected by STORAGE_DESCS_LOCK. */
@@ -269,12 +271,19 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
ss_mutex_lock (&storage_descs_lock);
ss_mutex_lock (&desc->lock);
+#ifndef NDEBUG
+ assert (desc->owner == l4_nilthread);
+ desc->owner = l4_myself ();
+#endif
/* DESC->FREE may be zero if someone came along and deallocated
a page between our dropping and retaking the lock. */
if (desc->free == 0)
list_unlink (desc);
+#ifndef NDEBUG
+ desc->owner = l4_nilthread;
+#endif
ss_mutex_unlock (&desc->lock);
ss_mutex_unlock (&storage_descs_lock);
}
@@ -325,14 +334,25 @@ storage_shadow_setup (struct cap *cap, addr_t folio)
static bool storage_init_done;
+static int
+num_threads (void)
+{
+ extern int __pthread_num_threads __attribute__ ((weak));
+
+ if (&__pthread_num_threads)
+ return __pthread_num_threads;
+ else
+ return 1;
+}
+
/* The minimum number of pages that should be available. This should
probably be per-thread (or at least per-CPU). */
-#define FREE_PAGES_LOW_WATER 64
+#define FREE_PAGES_LOW_WATER (32 + 16 * num_threads ())
/* If the number of free pages drops below this amount, the we might
soon have a problem. In this case, we serialize access to the pool
of available pages to allow some thread that is able to allocate
more pages the chance to do so. */
-#define FREE_PAGES_SERIALIZE 32
+#define FREE_PAGES_SERIALIZE (16 + 8 * num_threads ())
static pthread_mutex_t storage_low_mutex
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
@@ -531,11 +551,13 @@ storage_alloc (addr_t activity,
int tries = 0;
do
{
- if (tries ++ == 5)
+ if (++ tries == 5)
{
backtrace_print ();
- debug (0, "Failing to get storage (free count: %d). Live lock?",
- free_count);
+ debug (0, "Failing to get storage (free count: %d). Live lock? "
+ "(%p; %p; %p)",
+ free_count,
+ short_lived, long_lived_allocing, long_lived_freeing);
}
storage_check_reserve_internal (do_allocate, meta_data_activity,
@@ -551,7 +573,17 @@ storage_alloc (addr_t activity,
lead to allocating additional storage areas, however, this
should be proportional to the contention. */
if (ss_mutex_trylock (&list->lock))
- return list;
+ {
+#ifndef NDEBUG
+ assert (list->owner == l4_nilthread);
+ list->owner = l4_myself ();
+#endif
+ return list;
+ }
+
+ if (tries == 5)
+ debug (0, ADDR_FMT " locked by %x; contains %d free objects",
+ ADDR_PRINTF (list->folio), list->owner, list->free);
list = list->next;
}
@@ -634,14 +666,6 @@ storage_alloc (addr_t activity,
ss_mutex_unlock (&storage_descs_lock);
}
- addr_t a = addr;
- error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
- &a, NULL);
- assertx (! err,
- "Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
- idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
- assert (ADDR_EQ (a, addr));
-
struct object *shadow = desc->shadow;
struct cap *cap = NULL;
if (likely (!! shadow))
@@ -654,8 +678,20 @@ storage_alloc (addr_t activity,
assert (! as_init_done);
/* We drop DESC->LOCK. */
+#ifndef NDEBUG
+ assert (desc->owner == l4_myself ());
+ desc->owner = l4_nilthread;
+#endif
ss_mutex_unlock (&desc->lock);
+ addr_t a = addr;
+ error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
+ &a, NULL);
+ assertx (! err,
+ "Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
+ idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
+ assert (ADDR_EQ (a, addr));
+
if (! ADDR_IS_VOID (addr))
/* We also have to update the shadow for ADDR. Unfortunately, we
don't have the cap although the caller might. */
@@ -716,6 +752,10 @@ storage_free_ (addr_t object, bool unmap_now)
ADDR_PRINTF (object));
ss_mutex_lock (&storage->lock);
+#ifndef NDEBUG
+ assert (storage->owner == l4_nilthread);
+ storage->owner = l4_myself ();
+#endif
storage->free ++;
@@ -813,6 +853,10 @@ storage_free_ (addr_t object, bool unmap_now)
else
assert (! as_init_done);
+#ifndef NDEBUG
+ assert (storage->owner == l4_myself ());
+ storage->owner = l4_nilthread;
+#endif
ss_mutex_unlock (&storage->lock);
}
@@ -861,6 +905,9 @@ storage_init (void)
sdesc = storage_desc_alloc ();
sdesc->lock = (ss_mutex_t) 0;
+#ifndef NDEBUG
+ sdesc->owner = l4_nilthread;
+#endif
sdesc->folio = folio;
sdesc->free = FOLIO_OBJECTS;
sdesc->mode = LONG_LIVED_ALLOCING;