summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <syn@sceen.net>2007-07-10 00:30:58 +0000
committerRichard Braun <syn@sceen.net>2007-07-10 00:30:58 +0000
commit7d5ea3bfbb3d1bae12c4be2fed262b8e044e3588 (patch)
tree5cb8384e2d381369cc19bd39ada8b9d1325b970f
parent5c50fe6384f5e9f29312e3b9afef5c2b58000c40 (diff)
Fixed bugs and deadlocks.
-rw-r--r--libtnttk/cache.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/libtnttk/cache.c b/libtnttk/cache.c
index 49fa6f5..d79e19c 100644
--- a/libtnttk/cache.c
+++ b/libtnttk/cache.c
@@ -322,9 +322,10 @@ cache_grow(cache_t cache)
/*
* Remove fully free slabs from the cache, destroy all objects within the
- * slab and release the underlying pages.
+ * slab and release the underlying pages. The given cache must be in the
+ * reap cache list.
*
- * cache must be locked.
+ * The reap cache list and the given cache must be locked.
*/
static void
cache_reap(cache_t cache)
@@ -381,9 +382,7 @@ cache_reap(cache_t cache)
assert(cache_alloc_consistent(cache));
}
- pthread_mutex_lock(&reap_cache_list_mutex);
list_remove(&reap_cache_list, cache, reap_cache_link);
- pthread_mutex_unlock(&reap_cache_list_mutex);
}
void *
@@ -437,6 +436,7 @@ cache_free(cache_t cache, void *address)
bufctl_t bufctl;
slab_t slab;
+ pthread_mutex_lock(&reap_cache_list_mutex);
cache_lock(cache);
assert(cache_alloc_consistent(cache));
bufctl = address + cache->object_size;
@@ -447,16 +447,10 @@ cache_free(cache_t cache, void *address)
cache->free_object_count++;
/*
- * Special case optimized.
- */
- if ((cache->alloc_slab == SLAB_NULL)
- && (slab == list_last(&cache->slabs, slab_t)))
- cache->alloc_slab = slab;
-
- /*
* This slab is now partially free. Keep the slab list sorted.
*/
- else if (slab->ref_count == (cache->objects_per_slab - 1))
+ if ((slab->ref_count > 0)
+ && (slab->ref_count == (cache->objects_per_slab - 1)))
{
list_remove(&cache->slabs, slab, link);
@@ -484,16 +478,16 @@ cache_free(cache_t cache, void *address)
list_insert_tail(&cache->slabs, slab, link);
}
+ if (cache->alloc_slab == SLAB_NULL)
+ cache->alloc_slab = slab;
+
if (list_link_null(cache, reap_cache_link))
- {
- pthread_mutex_lock(&reap_cache_list_mutex);
- list_insert_tail(&reap_cache_list, cache, reap_cache_link);
- pthread_mutex_unlock(&reap_cache_list_mutex);
- }
+ list_insert_tail(&reap_cache_list, cache, reap_cache_link);
}
assert(cache_alloc_consistent(cache));
cache_unlock(cache);
+ pthread_mutex_unlock(&reap_cache_list_mutex);
}
/*
@@ -556,6 +550,8 @@ cache_create(const char *name, size_t object_size,
void
cache_destroy(cache_t cache)
{
+ pthread_mutex_lock(&cache_list_mutex);
+ pthread_mutex_lock(&reap_cache_list_mutex);
cache_lock(cache);
assert(cache_alloc_consistent(cache));
@@ -563,10 +559,11 @@ cache_destroy(cache_t cache)
cache_reap(cache);
assert(list_empty(&cache->slabs));
- pthread_mutex_lock(&cache_list_mutex);
+ cache_unlock(cache);
+ pthread_mutex_unlock(&reap_cache_list_mutex);
list_remove(&cache_list, cache, cache_link);
pthread_mutex_unlock(&cache_list_mutex);
- cache_unlock(cache);
+
cache_free(cache_cache, cache);
}