summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorneal <neal>2008-01-04 15:48:04 +0000
committerneal <neal>2008-01-04 15:48:04 +0000
commit4ecdfe1b47c4b27b47c6b41bdcd0c8d2b17917e1 (patch)
treef22ab423a47680741f8e25eca2a28cdb111f0298
parent60a852e0afb70f213f5d7cd9f08bd9e7de9d354b (diff)
2008-01-04 Neal H. Walfield <neal@gnu.org>
* activity.h (struct activity): Add fields eviction_clean and eviction_dirty. Improve comments. (activity_charge): Change asserts to account for unsignedness. * activity.c (activity_destroy): Move all objects owned by VICTIM to its parent. (activity_deprepare): Add additional asserts. (do_activity_dump): Don't assert that ACTIVITY->FRAMES_LOCAL is the sum of the items on ACTIVITY's LRU lists. * object.h (struct object_desc): Add fields eviction_candidate, live, laundry_node and available_node. Make priority_node a union with activity_node, which replaces activity_lru. Remove field global_lru. (object_activity_lru): Rename this list class... (activity_lru): ... to this. Update users. (object_global_lru): Don't generate this list class. (eviction): Generate new list class. (available): Likewise. (laundry): Likewise. (global_active): Remove declaration. (global_inactive_dirty): Likewise. (global_inactive_clean): Likewise. (disowned): Likewise. (laundry): New declaration. (available): Likewise. (memory_object_destroy): Likewise. (object_desc_disown_simple): Remove declaration. (object_disown_simple): Remove function. (object_desc_disown): Likewise. (object_disown): Likewise. (object_desc_claim): Take additional parameter update_accounting. Update users. (object_claim): Likewise. (object_desc_unmap): New function. (object_age): Likewise. (objects_folio_offset): Likewise. (objects_folio): Likewise. (object_free): Implement in terms of the above two functions. * object.c (global_active): Remove variable. (global_inactive_dirty): Likewise. (global_inactive_clean): Likewise. (disowned): Likewise. (laundry): New variable. (available): Likewise. (memory_object_alloc): Initialize ODESC to 0. Call object_desc_claim to attach it to the relevant lists. Assert that ODESC->LIVE is 0. Set ODESC->LIVE to 1. (memory_object_destroy): Remove static qualifier. Require that LRU_LOCK be held on entry. Update users. Use object_desc_claim to disconnect DESC from any lists to which it is attached. Don't call memory_frame_free, that is now the caller's responsibility. Update users. Set DESC->LIVE to 0. (folio_free): Don't disown the folio header. (folio_object_alloc): Call memory_frame_free to really free the memory. (object_desc_disown_simple): Remove function. (object_desc_disown_): Likewise. (object_desc_claim): Take additional parameter update_accounting. If true, update the relevant activities' accounting information. Update connect and disconnect code. Only add an object to one of the priority tree and the lru lists, but not both. * viengoos.c (system_task_load): After allocating the root activity, have the root activity claim it and FOLIO. * ager.c: Include "zalloc.h". (AGE_DELTA): Don't define. (ager_loop): Rewrite to walk the object descriptors sequentially rather than following a linked list. Update object list connection and disconnection code. * pager.h: New file. * pager.c: Likewise. * Makefile.am (viengoos_SOURCES): Add pager.h and pager.c. * memory.h (struct activity): Add forward. (memory_frame_allocate): Take additional parameter activity. Return a uintptr_t instead of an l4_word_t. Update users. * memory.c: Include "pager.h" and "activity.h". (memory_grab): Always get base page sized pages. (memory_frame_allocate): Take additional parameter activity. Return a uintptr_t instead of an l4_word_t. If zalloc fails, check AVAILABLE_LIST. If nothing is applicable, call pager_collect and try again. * t-environment.h (pager_collect): New function.
-rw-r--r--viengoos/ChangeLog83
-rw-r--r--viengoos/Makefile.am1
-rw-r--r--viengoos/activity.c167
-rw-r--r--viengoos/activity.h39
-rw-r--r--viengoos/ager.c234
-rw-r--r--viengoos/memory.c76
-rw-r--r--viengoos/memory.h11
-rw-r--r--viengoos/object.c244
-rw-r--r--viengoos/object.h255
-rw-r--r--viengoos/pager.c373
-rw-r--r--viengoos/pager.h22
-rw-r--r--viengoos/t-environment.h6
-rw-r--r--viengoos/viengoos.c9
13 files changed, 1087 insertions, 433 deletions
diff --git a/viengoos/ChangeLog b/viengoos/ChangeLog
index 7b3de11..306a204 100644
--- a/viengoos/ChangeLog
+++ b/viengoos/ChangeLog
@@ -1,5 +1,88 @@
2008-01-04 Neal H. Walfield <neal@gnu.org>
+ * activity.h (struct activity): Add fields eviction_clean and
+ eviction_dirty. Improve comments.
+ (activity_charge): Change asserts to account for unsignedness.
+ * activity.c (activity_destroy): Move all objects owned by VICTIM
+ to its parent.
+ (activity_deprepare): Add additional asserts.
+ (do_activity_dump): Don't assert that ACTIVITY->FRAMES_LOCAL is
+ the sum of the items on ACTIVITY's LRU lists.
+ * object.h (struct object_desc): Add fields eviction_candidate,
+ live, laundry_node and available_node. Make priority_node a union
+ with activity_node, which replaces activity_lru. Remove field
+ global_lru.
+ (object_activity_lru): Rename this list class...
+ (activity_lru): ... to this. Update users.
+ (object_global_lru): Don't generate this list class.
+ (eviction): Generate new list class.
+ (available): Likewise.
+ (laundry): Likewise.
+ (global_active): Remove declaration.
+ (global_inactive_dirty): Likewise.
+ (global_inactive_clean): Likewise.
+ (disowned): Likewise.
+ (laundry): New declaration.
+ (available): Likewise.
+ (memory_object_destroy): Likewise.
+ (object_desc_disown_simple): Remove declaration.
+ (object_disown_simple): Remove function.
+ (object_desc_disown): Likewise.
+ (object_disown): Likewise.
+ (object_desc_claim): Take additional parameter update_accounting.
+ Update users.
+ (object_claim): Likewise.
+ (object_desc_unmap): New function.
+ (object_age): Likewise.
+ (objects_folio_offset): Likewise.
+ (objects_folio): Likewise.
+ (object_free): Implement in terms of the above two functions.
+ * object.c (global_active): Remove variable.
+ (global_inactive_dirty): Likewise.
+ (global_inactive_clean): Likewise.
+ (disowned): Likewise.
+ (laundry): New variable.
+ (available): Likewise.
+ (memory_object_alloc): Initialize ODESC to 0. Call
+ object_desc_claim to attach it to the relevant lists. Assert that
+ ODESC->LIVE is 0. Set ODESC->LIVE to 1.
+ (memory_object_destroy): Remove static qualifier. Require that
+ LRU_LOCK be held on entry. Update users. Use object_desc_claim
+ to disconnect DESC from any lists to which it is attached. Don't
+ call memory_frame_free, that is now the caller's responsibility.
+ Update users. Set DESC->LIVE to 0.
+ (folio_free): Don't disown the folio header.
+ (folio_object_alloc): Call memory_frame_free to really free the
+ memory.
+ (object_desc_disown_simple): Remove function.
+ (object_desc_disown_): Likewise.
+ (object_desc_claim): Take additional parameter update_accounting.
+ If true, update the relevant activities' accounting information.
+ Update connect and disconnect code. Only add an object to one of
+ the priority tree and the lru lists, but not both.
+ * viengoos.c (system_task_load): After allocating the
+ root activity, have the root activity claim it and FOLIO.
+ * ager.c: Include "zalloc.h".
+ (AGE_DELTA): Don't define.
+ (ager_loop): Rewrite to walk the object descriptors sequentially
+ rather than following a linked list. Update object list
+ connection and disconnection code.
+ * pager.h: New file.
+ * pager.c: Likewise.
+ * Makefile.am (viengoos_SOURCES): Add pager.h and pager.c.
+ * memory.h (struct activity): Add forward.
+ (memory_frame_allocate): Take additional parameter activity.
+ Return a uintptr_t instead of an l4_word_t. Update users.
+ * memory.c: Include "pager.h" and "activity.h".
+ (memory_grab): Always get base page sized pages.
+ (memory_frame_allocate): Take additional parameter activity.
+ Return a uintptr_t instead of an l4_word_t. If zalloc fails,
+ check AVAILABLE_LIST. If nothing is applicable, call
+ pager_collect and try again.
+ * t-environment.h (pager_collect): New function.
+
+2008-01-04 Neal H. Walfield <neal@gnu.org>
+
* memory.c (memory_add): Add to MEMORY_TOTAL here.
(memory_grab): Never here.
diff --git a/viengoos/Makefile.am b/viengoos/Makefile.am
index 77c36df..55c8cfd 100644
--- a/viengoos/Makefile.am
+++ b/viengoos/Makefile.am
@@ -51,6 +51,7 @@ viengoos_SOURCES = $(ARCH_SOURCES) \
bits.h \
elf.h loader.h loader.c \
server.h server.c \
+ pager.h pager.c \
list.h
# Doug Lea's malloc is included by malloc-wrap.c.
diff --git a/viengoos/activity.c b/viengoos/activity.c
index f02b128..2838a92 100644
--- a/viengoos/activity.c
+++ b/viengoos/activity.c
@@ -114,47 +114,123 @@ activity_destroy (struct activity *activity, struct activity *victim)
object_free (activity, o);
}
- /* Disown all allocated memory objects. */
- ss_mutex_lock (&lru_lock);
- struct object_desc *desc;
- int count = 0;
- while ((desc = object_activity_lru_list_head (&victim->active)))
- {
- object_desc_disown_simple (desc);
- count ++;
- }
- while ((desc = object_activity_lru_list_head (&victim->inactive_clean)))
- {
- object_desc_disown_simple (desc);
- count ++;
- }
- while ((desc = object_activity_lru_list_head (&victim->inactive_dirty)))
- {
- object_desc_disown_simple (desc);
- count ++;
- }
- ss_mutex_unlock (&lru_lock);
+ /* VICTIM->PARENT inherits all of VICTIM's objects. */
+ {
+ ss_mutex_lock (&lru_lock);
+
+ struct object_desc *desc;
+ int count = 0;
+
+ /* Make ACTIVE objects inactive. */
+ while ((desc = activity_lru_list_head (&victim->active)))
+ {
+ assert (! desc->eviction_candidate);
+ assert (desc->activity == victim);
+
+ activity_lru_list_unlink (&victim->active, desc);
+
+ if (desc->dirty && ! desc->policy.discardable)
+ activity_lru_list_queue (&victim->parent->inactive_dirty, desc);
+ else
+ activity_lru_list_queue (&victim->parent->inactive_clean, desc);
+
+ desc->activity = victim->parent;
+ count ++;
+ }
+
+ struct object_desc *next = hurd_btree_priorities_first (&victim->priorities);
+ while ((desc = next))
+ {
+ assert (! desc->eviction_candidate);
+ assert (desc->activity == victim);
- activity_charge (victim, -count);
+ next = hurd_btree_priorities_next (desc);
- do_debug (1)
- if (victim->frames_total != 0)
+ if (desc->dirty && ! desc->policy.discardable)
+ activity_lru_list_queue (&victim->parent->inactive_dirty, desc);
+ else
+ activity_lru_list_queue (&victim->parent->inactive_clean, desc);
+
+ desc->activity = victim->parent;
+ count ++;
+ }
+#ifndef NDEBUG
+ hurd_btree_priorities_tree_init (&victim->priorities);
+#endif
+
+ /* Move inactive objects to the head of VICTIM->PARENT's appropriate
+ inactive list (thereby making them the first eviction
+ candidates). */
+ for (desc = activity_lru_list_head (&victim->inactive_clean);
+ desc; desc = activity_lru_list_next (desc))
{
- debug (0, "activity (%llx)->frames_total = %d",
- object_to_object_desc ((struct object *) victim)->oid,
- victim->frames_total);
- activity_dump (root_activity);
-
- struct object_desc *desc;
- ss_mutex_lock (&lru_lock);
- for (desc = object_activity_lru_list_head (&victim->active);
- desc; desc = object_activity_lru_list_next (desc))
- debug (0, " %llx: %s", desc->oid, cap_type_string (desc->type));
- ss_mutex_unlock (&lru_lock);
+ assert (! desc->eviction_candidate);
+ assert (desc->activity == victim);
+ assert (! desc->dirty || desc->policy.discardable);
+ assert (! list_node_attached (&desc->laundry_node));
+
+ desc->activity = victim->parent;
+ count ++;
}
- assert (victim->frames_total == 0);
- assert (victim->frames_local == 0);
- assert (victim->folio_count == 0);
+ activity_lru_list_join (&victim->parent->inactive_clean,
+ &victim->inactive_clean);
+
+ for (desc = activity_lru_list_head (&victim->inactive_dirty);
+ desc; desc = activity_lru_list_next (desc))
+ {
+ assert (! desc->eviction_candidate);
+ assert (desc->activity == victim);
+ assert (desc->dirty && ! desc->policy.discardable);
+
+ desc->activity = victim->parent;
+ count ++;
+ }
+ activity_lru_list_join (&victim->parent->inactive_dirty,
+ &victim->inactive_dirty);
+
+
+ /* And move all of VICTIM's eviction candidates to VICTIM->PARENT's
+ eviction lists. */
+ while ((desc = eviction_list_head (&victim->eviction_clean)))
+ {
+ assert (desc->eviction_candidate);
+ assert (desc->activity == victim);
+ assert (! list_node_attached (&desc->laundry_node));
+ assert (! desc->dirty || desc->policy.discardable);
+
+ desc->activity = victim->parent;
+ }
+ eviction_list_join (&victim->parent->eviction_clean,
+ &victim->eviction_clean);
+
+ while ((desc = eviction_list_head (&victim->eviction_dirty)))
+ {
+ assert (desc->eviction_candidate);
+ assert (desc->activity == victim);
+ assert (list_node_attached (&desc->laundry_node));
+ assert (desc->dirty && !desc->policy.discardable);
+
+ desc->activity = victim->parent;
+ }
+ eviction_list_join (&victim->parent->eviction_dirty,
+ &victim->eviction_dirty);
+
+ ss_mutex_unlock (&lru_lock);
+
+ /* Adjust the counting information. */
+ do_debug (1)
+ if (victim->frames_total != count || victim->frames_local != count)
+ {
+ debug (0, "activity (%llx), total = %d, local: %d, count: %d",
+ object_to_object_desc ((struct object *) victim)->oid,
+ victim->frames_total, victim->frames_local, count);
+ activity_dump (root_activity);
+ }
+ assert (count == victim->frames_local);
+ assert (count == victim->frames_total);
+ victim->frames_local = victim->frames_total = 0;
+ victim->parent->frames_local += count;
+ }
activity_deprepare (activity, victim);
@@ -216,9 +292,14 @@ activity_prepare (struct activity *principal, struct activity *activity)
void
activity_deprepare (struct activity *principal, struct activity *victim)
{
- /* If we have any in-memory children, then we can't be paged
- out. */
+ /* If we have any in-memory children or frames, then we can't be
+ paged out. */
assert (! victim->children);
+ assert (! activity_lru_list_count (&victim->active));
+ assert (! activity_lru_list_count (&victim->inactive_clean));
+ assert (! activity_lru_list_count (&victim->inactive_dirty));
+ assert (! eviction_list_count (&victim->eviction_clean));
+ assert (! eviction_list_count (&victim->eviction_dirty));
/* Unlink from parent's children list. */
assert (victim->parent);
@@ -242,9 +323,9 @@ do_activity_dump (struct activity *activity, int indent)
memset (indent_string, ' ', indent);
indent_string[indent] = 0;
- int active = object_activity_lru_list_count (&activity->active);
- int dirty = object_activity_lru_list_count (&activity->inactive_dirty);
- int clean = object_activity_lru_list_count (&activity->inactive_clean);
+ int active = activity_lru_list_count (&activity->active);
+ int dirty = activity_lru_list_count (&activity->inactive_dirty);
+ int clean = activity_lru_list_count (&activity->inactive_clean);
printf ("%s %llx: %d frames (active: %d, dirty: %d, clean: %d) "
"(total frames: %d)\n",
@@ -253,8 +334,6 @@ do_activity_dump (struct activity *activity, int indent)
activity->frames_local, active, dirty, clean,
activity->frames_total);
- assert (active + dirty + clean == activity->frames_local);
-
struct activity *child;
activity_for_each_child (activity, child,
({ do_activity_dump (child, indent + 1); }));
diff --git a/viengoos/activity.h b/viengoos/activity.h
index 3d0a2fb..e0fc53a 100644
--- a/viengoos/activity.h
+++ b/viengoos/activity.h
@@ -67,20 +67,33 @@ struct activity
struct activity *sibling_next;
struct activity *sibling_prev;
- /* Objects owned by this activity. */
- struct object_activity_lru_list active;
- struct object_activity_lru_list inactive_clean;
- struct object_activity_lru_list inactive_dirty;
-
- /* All objects owned by this activity whose priority is not
- OBJECT_PRIORITY_LRU, keyed by priority. */
+ /* Objects owned by this activity whose priority is
+ OBJECT_PRIORITY_LRU and for which DESC->EVICTION_CANDIDATE is
+ false. */
+ struct activity_lru_list active;
+ struct activity_lru_list inactive_clean;
+ struct activity_lru_list inactive_dirty;
+
+ /* Objects owned by this activity whose priority is not
+ OBJECT_PRIORITY_LRU and for which DESC->EVICTION_CANDIDATE is
+ false. Keyed by priority. */
hurd_btree_priorities_t priorities;
+ /* Objects that are owned by this activity and have been selected
+ for eviction (DESC->EVICTION_CANDIDATE is true). These objects
+ do not appear on the active or inactive lists and do not
+ contribute to frames_local or frames_total. */
+ struct eviction_list eviction_clean;
+ struct eviction_list eviction_dirty;
+
+ /* Number of frames allocated to this activity not counting
+ children. */
uint32_t frames_local;
/* Number of frames allocated to this activity (including children).
This is the sum of the number of objects on ACTIVE,
INACTIVE_CLEAN and INACTIVE_DIRTY plus the number of frames
- allocated to each child. */
+ allocated to each child. This does not include the number of
+ frames on the page out list. */
uint32_t frames_total;
int dying;
@@ -135,12 +148,18 @@ static inline void
activity_charge (struct activity *activity, int objects)
{
assert (activity);
+ if (objects < 0)
+ assertx (-objects <= activity->frames_local,
+ "%d <= %d", -objects, activity->frames_local);
+
activity->frames_local += objects;
activity_for_each_ancestor (activity,
({
- assert (activity->frames_total >= 0);
+ if (objects < 0)
+ assertx (-objects <= activity->frames_total,
+ "%d <= %d",
+ -objects, activity->frames_total);
activity->frames_total += objects;
- assert (activity->frames_total >= 0);
}));
}
diff --git a/viengoos/ager.c b/viengoos/ager.c
index 0740606..a101a45 100644
--- a/viengoos/ager.c
+++ b/viengoos/ager.c
@@ -1,5 +1,5 @@
/* ager.c - Ager loop implementation.
- Copyright (C) 2007 Free Software Foundation, Inc.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
This file is part of the GNU Hurd.
@@ -27,6 +27,7 @@
#include "ager.h"
#include "object.h"
#include "activity.h"
+#include "zalloc.h"
/* When frames are shared amoung multiple activities, the first
activity to access the frame is charged. This is unfair; the cost
@@ -54,9 +55,6 @@
#define UNMAP_INACTIVE 0
#define UNMAP_PERIODICALLY 1
-/* The amount to age a frame when it is found to be active. */
-#define AGE_DELTA (1 << 8)
-
void
ager_loop (l4_thread_id_t main_thread)
{
@@ -69,17 +67,11 @@ ager_loop (l4_thread_id_t main_thread)
int iterations = 0;
#endif
+ int frames = (last_frame - first_frame + PAGESIZE) / PAGESIZE;
+
for (;;)
{
- int active = 0;
- int inactive = 0;
- int retired = 0;
- int revived = 0;
-
- /* XXX: This lock could be held for a while. It would be much
- better to grab it and release it as required. This
- complicates the code a bit and requires some thought. */
- ss_mutex_lock (&lru_lock);
+ int frame = 0;
#define BATCH_SIZE (L4_NUM_MRS / 2)
struct object_desc *descs[BATCH_SIZE];
@@ -88,22 +80,43 @@ ager_loop (l4_thread_id_t main_thread)
/* We try to batch calls to l4_unmap, hence the acrobatics. */
- /* Grab a batch of records starting with *PTR. Changes *PTR to
- point to the next unprocessed record. */
- int grab (struct object_desc **ptr)
+ /* Grab a batch of live objects starting with object I. */
+ int grab (void)
{
- int count = 0;
- for (; *ptr && count < BATCH_SIZE;
- *ptr = object_global_lru_list_next (*ptr))
+ int count;
+ for (count = 0; frame < frames && count < BATCH_SIZE; frame ++)
{
- if (! ss_mutex_trylock (&(*ptr)->lock))
- /* We failed to get the lock. This means that someone is
- using this object and thus it makes no sense to
- age it; just continue. */
+ struct object_desc *desc = &object_descs[frame];
+
+ if (! desc->live)
+ /* The object is not live. */
+ continue;
+ if (desc->eviction_candidate)
+ /* Eviction candidates are unmapped. Don't waste our
+ time. */
continue;
- descs[count] = (*ptr);
- objects[count] = object_desc_to_object (descs[count]);
+ if (! ss_mutex_trylock (&desc->lock))
+ /* We failed to get the lock. This means that
+ someone is using this object and thus it makes no
+ sense to age it; just continue. */
+ continue;
+
+ if (! desc->live || desc->eviction_candidate)
+ /* State changed between check and lock acquisition,
+ unlock and continue. */
+ {
+ ss_mutex_unlock (&desc->lock);
+ continue;
+ }
+
+ assertx (desc->activity,
+ "OID: " OID_FMT " (%s), age: %d",
+ OID_PRINTF (desc->oid), cap_type_string (desc->type),
+ desc->age);
+
+ descs[count] = desc;
+ objects[count] = object_desc_to_object (desc);
fpages[count] = l4_fpage ((l4_word_t) objects[count], PAGESIZE);
count ++;
@@ -112,15 +125,15 @@ ager_loop (l4_thread_id_t main_thread)
return count;
}
- struct object_desc *ptr = object_global_lru_list_head (&global_active);
- for (;;)
+ int retired = 0;
+ int revived = 0;
+
+ while (frame < frames)
{
- int count = grab (&ptr);
+ int count = grab ();
if (count == 0)
break;
- active += count;
-
/* Get the status bits for the COUNT objects. (We flush
rather than unmap as we are also interested in whether we
have accessed the object--this occurs as we access some
@@ -128,6 +141,8 @@ ager_loop (l4_thread_id_t main_thread)
flushed a page of data to disk.) */
l4_flush_fpages (count, fpages);
+ ss_mutex_lock (&lru_lock);
+
int i;
for (i = 0; i < count; i ++)
{
@@ -138,144 +153,91 @@ ager_loop (l4_thread_id_t main_thread)
int dirty = (rights & L4_FPAGE_WRITABLE);
int referenced = (rights & L4_FPAGE_READABLE);
- desc->dirty = desc->dirty || dirty;
+ if (dirty)
+ /* Dirty implies referenced. */
+ assert (referenced);
- assert (desc->age);
-
- if (! referenced)
+ if (desc->age)
+ /* The object was active. */
{
-#if UNMAP_INACTIVE
- l4_fpage_t f;
- f = l4_fpage_add_rights (fpage, L4_FPAGE_FULLY_ACCESSIBLE);
- l4_unmap_fpage (f);
-#endif
+ assert (desc->activity);
- desc->age >>= 1;
+ desc->dirty |= dirty;
- if (desc->age == 0)
+ if (referenced)
+ object_age (desc);
+ else
{
- /* Move to the appropriate inactive lists. */
- object_global_lru_list_unlink (&global_active, desc);
- if (desc->dirty && ! desc->policy.discardable)
- object_global_lru_list_push (&global_inactive_dirty,
- desc);
- else
- object_global_lru_list_push (&global_inactive_clean,
- desc);
-
- /* If the object is owned, then move it to its
- activity's inactive list. Otherwise, the
- page is on the disowned list in which case we
- needn't do anything. */
- if (desc->activity)
+ desc->age >>= 1;
+
+ if (! desc->age
+ && desc->policy.priority == OBJECT_PRIORITY_LRU)
+ /* The object has become inactive. */
{
- object_activity_lru_list_unlink
- (desc->activity
- ? &desc->activity->active : &disowned,
- desc);
+ retired ++;
+
+ /* Detach from active list. */
+ activity_lru_list_unlink (&desc->activity->active,
+ desc);
+ /* Attach to appropriate inactive list. */
if (desc->dirty && ! desc->policy.discardable)
- object_activity_lru_list_push
+ activity_lru_list_push
(&desc->activity->inactive_dirty, desc);
else
- object_activity_lru_list_push
+ activity_lru_list_push
(&desc->activity->inactive_clean, desc);
+
+#if UNMAP_INACTIVE
+ l4_fpage_t f;
+ f = l4_fpage_add_rights (fpage,
+ L4_FPAGE_FULLY_ACCESSIBLE);
+ l4_unmap_fpage (f);
+#endif
}
}
-
- retired ++;
}
else
+ /* The object was inactive. */
{
- /* Be careful with wrap around. */
- if ((typeof (desc->age)) (desc->age + AGE_DELTA) > desc->age)
- desc->age += AGE_DELTA;
- }
-
- ss_mutex_unlock (&desc->lock);
- }
- }
-
-#if !UNMAP_INACTIVE
- struct object_global_lru_list *lists[] = { &global_inactive_clean,
- &global_inactive_dirty };
- int j;
- for (j = 0; j < 2; j ++)
- {
- ptr = object_global_lru_list_head (lists[j]);
-
- for (;;)
- {
- int count = grab (&ptr);
- if (count == 0)
- break;
-
- inactive += count;
-
- /* Get the status bits for the COUNT objects. (We flush
- rather than unmap as we are also interested in whether we
- have accessed the object--this occurs as we access some
- objects, e.g., cappages, on behalf activities or we have
- flushed a page of data to disk.) */
- l4_flush_fpages (count, fpages);
-
- int i;
- for (i = 0; i < count; i ++)
- {
- struct object_desc *desc = descs[i];
- l4_fpage_t fpage = fpages[i];
-
- l4_word_t rights = l4_rights (fpage);
-
- int dirty = (rights & L4_FPAGE_WRITABLE);
- int referenced = (rights & L4_FPAGE_READABLE);
- if (dirty)
- assert (referenced);
-
- desc->dirty = desc->dirty ? : dirty;
-
if (referenced)
+ /* The object has become active. */
{
revived ++;
- /* Move to the active list. */
- object_global_lru_list_unlink (lists[j], desc);
- object_global_lru_list_push (&global_active, desc);
-
desc->age = AGE_DELTA;
- /* If the object is not owned, then it is on the
- disowned list. It's unusual that we should
- get a reference on an unowned object, but,
- that can happen. */
- if (desc->activity)
+ if (desc->policy.priority == OBJECT_PRIORITY_LRU)
{
- object_activity_lru_list_unlink
- (desc->activity
- ? (j == 0
- ? &desc->activity->inactive_clean
- : &desc->activity->inactive_dirty)
- : &disowned,
- desc);
- object_activity_lru_list_push
- (&desc->activity->active, desc);
+ /* Detach from inactive list. */
+ if (desc->dirty && ! desc->policy.discardable)
+ activity_lru_list_unlink
+ (&desc->activity->inactive_dirty, desc);
+ else
+ activity_lru_list_unlink
+ (&desc->activity->inactive_clean, desc);
+
+ /* Attach to active list. */
+ activity_lru_list_push (&desc->activity->active,
+ desc);
}
- }
- ss_mutex_unlock (&desc->lock);
+ desc->dirty |= dirty;
+ }
}
+
+ ss_mutex_unlock (&desc->lock);
}
- }
-#endif
- ss_mutex_unlock (&lru_lock);
+ ss_mutex_unlock (&lru_lock);
+ }
#if UNMAP_PERIODICALLY
if (iterations == 8 * 5)
{
- debug (1, "Unmapping all. %d active, %d inactive, last interation "
- "retired: %d, revived: %d",
- active, inactive, retired, revived);
+ debug (1, "Unmapping all (%d of %d). "
+ "last interation retired: %d, revived: %d",
+ zalloc_memory, memory_total, retired, revived);
l4_unmap_fpage (l4_fpage_add_rights (L4_COMPLETE_ADDRESS_SPACE,
L4_FPAGE_FULLY_ACCESSIBLE));
diff --git a/viengoos/memory.c b/viengoos/memory.c
index 2b90017..fafe44a 100644
--- a/viengoos/memory.c
+++ b/viengoos/memory.c
@@ -18,8 +18,10 @@
along with this program. If not, see
<http://www.gnu.org/licenses/>. */
-#include "zalloc.h"
#include "memory.h"
+#include "pager.h"
+#include "activity.h"
+#include "zalloc.h"
#include <string.h>
@@ -333,13 +335,22 @@ memory_grab (void)
#else
l4_word_t s;
l4_fpage_t fpage;
+
+#if 0
+ /* We need the dirty bits at a page granularity due to our own
+ references. This unfortunately means no large pages. */
+
/* Try with the largest fpage possible. */
for (s = L4_WORDSIZE - 1; s >= l4_min_page_size_log2 (); s --)
- /* Keep getting pages of size 2^S. */
- while (! l4_is_nil_fpage (fpage = sigma0_get_any (s)))
- /* FPAGE is an fpage of size 2^S. Add each non-reserved base
- frame to the free list. */
- add (l4_address (fpage), l4_size (fpage));
+ ...;
+#endif
+
+ s = l4_min_page_size_log2 ();
+ /* Keep getting pages of size 2^S. */
+ while (! l4_is_nil_fpage (fpage = sigma0_get_any (s)))
+ /* FPAGE is an fpage of size 2^S. Add each non-reserved base
+ frame to the free list. */
+ add (l4_address (fpage), l4_size (fpage));
#endif
#ifndef NDEBUG
@@ -348,10 +359,57 @@ memory_grab (void)
#endif
}
-l4_word_t
-memory_frame_allocate (void)
+uintptr_t
+memory_frame_allocate (struct activity *activity)
{
- l4_word_t f = zalloc (PAGESIZE);
+ uintptr_t f = zalloc (PAGESIZE);
+ if (! f)
+ {
+ bool collected = false;
+
+ for (;;)
+ {
+ /* Check if there are any pages on the available list. */
+ ss_mutex_lock (&lru_lock);
+
+ /* XXX: We avoid objects that require special treatment.
+ Realize this special treatment. */
+ struct object_desc *desc = available_list_tail (&available);
+ while (desc)
+ {
+ if (desc->type != cap_activity_control
+ && desc->type != cap_thread)
+ break;
+
+ desc = available_list_prev (&available);
+ }
+
+ if (desc)
+ {
+ assert (desc->live);
+ assert (desc->eviction_candidate);
+ assert (desc->activity);
+ assert (object_type ((struct object *) desc->activity)
+ == cap_activity_control);
+ assert (! desc->dirty || desc->policy.discardable);
+
+ struct object *object = object_desc_to_object (desc);
+ memory_object_destroy (activity, object);
+
+ f = (uintptr_t) object;
+ }
+
+ ss_mutex_unlock (&lru_lock);
+
+ if (f || collected)
+ break;
+
+ /* Try collecting. */
+ pager_collect ();
+ collected = true;
+ }
+ }
+
if (! f)
panic ("Out of memory");
diff --git a/viengoos/memory.h b/viengoos/memory.h
index 96f6baf..3e60bf4 100644
--- a/viengoos/memory.h
+++ b/viengoos/memory.h
@@ -1,5 +1,5 @@
/* memory.h - Basic memory management interface.
- Copyright (C) 2007 Free Software Foundation, Inc.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
This file is part of the GNU Hurd.
@@ -24,6 +24,9 @@
#include <stdint.h>
#include <l4.h>
+/* Forward. */
+struct activity;
+
enum memory_reservation
{
/* Our binary, never freed. */
@@ -70,9 +73,9 @@ extern void memory_reservation_clear (enum memory_reservation reservation);
/* Grab all the memory in the system. */
extern void memory_grab (void);
-/* Allocate a page of memory. Returns NULL if there is no memory
- available. */
-extern l4_word_t memory_frame_allocate (void);
+/* Allocate a page of memory on behalf of activity ACTIVITY. Returns
+ NULL if there is no memory available. */
+extern uintptr_t memory_frame_allocate (struct activity *activity);
/* Return the frame starting at address ADDR to the free pool. */
extern void memory_frame_free (l4_word_t addr);
diff --git a/viengoos/object.c b/viengoos/object.c
index 3976bfe..2b9a872 100644
--- a/viengoos/object.c
+++ b/viengoos/object.c
@@ -33,10 +33,8 @@ struct object_desc *object_descs;
ss_mutex_t lru_lock;
-struct object_global_lru_list global_active;
-struct object_global_lru_list global_inactive_dirty;
-struct object_global_lru_list global_inactive_clean;
-struct object_activity_lru_list disowned;
+struct laundry_list laundry;
+struct available_list available;
/* XXX: The number of in memory folios. (Recall: one folio => 512kb
storage.) */
@@ -88,10 +86,11 @@ memory_object_alloc (struct activity *activity,
{
debug (5, "Allocating %llx(%d), %s", oid, version, cap_type_string (type));
+ assert (activity || ! root_activity);
assert (type != cap_void);
assert ((type == cap_folio) == ((oid % (FOLIO_OBJECTS + 1)) == 0));
- struct object *object = (struct object *) memory_frame_allocate ();
+ struct object *object = (struct object *) memory_frame_allocate (activity);
if (! object)
{
/* XXX: Do some garbage collection. */
@@ -102,15 +101,13 @@ memory_object_alloc (struct activity *activity,
/* Fill in the object descriptor. */
struct object_desc *odesc = object_to_object_desc (object);
+ assert (! odesc->live);
+ memset (odesc, 0, sizeof (*odesc));
+
odesc->type = type;
odesc->version = version;
odesc->oid = oid;
- odesc->dirty = 0;
- odesc->lock = l4_nilthread;
-
- assert (! odesc->activity);
-
/* Add to OBJECTS. */
bool had_value;
hurd_ihash_value_t old_value;
@@ -119,71 +116,52 @@ memory_object_alloc (struct activity *activity,
assert (err == 0);
assert (! had_value);
- /* Connect to object lists. */
-
- /* Give it a nominal age so that it is not immediately paged out.
- Normally, the page will be immediately referenced. */
- odesc->age = 1;
-
- ss_mutex_lock (&lru_lock);
-
- odesc->global_lru.next = odesc->global_lru.prev = NULL;
- odesc->activity_lru.next = odesc->activity_lru.prev = NULL;
-
- object_global_lru_list_push (&global_active, odesc);
- /* object_desc_claim wants to first unlink the descriptor. To make
- it happy, we initially connect the descriptor to the disowned
- list. */
- object_activity_lru_list_push (&disowned, odesc);
+ /* Mark the object as live. */
+ odesc->live = 1;
if (! activity)
/* This may only happen if we are initializing. */
assert (! root_activity);
else
- /* Account the memory to the activity ACTIVITY. */
- object_desc_claim (activity, odesc, policy);
+ {
+ ss_mutex_lock (&lru_lock);
- ss_mutex_unlock (&lru_lock);
+ /* Account the memory to the activity ACTIVITY. */
+ object_desc_claim (activity, odesc, policy, true);
+
+ ss_mutex_unlock (&lru_lock);
+ }
return object;
}
-/* Release the object. */
-static void
+void
memory_object_destroy (struct activity *activity, struct object *object)
{
assert (activity);
+ assert (! ss_mutex_trylock (&lru_lock));
struct object_desc *desc = object_to_object_desc (object);
+ assert (desc->live);
+
debug (5, "Destroy %s at 0x%llx (object %d)",
cap_type_string (desc->type), desc->oid,
((uintptr_t) desc - (uintptr_t) object_descs)
/ sizeof (*desc));
+ if (desc->dirty && desc->policy.discardable)
+ /* Note that the page was discarded. */
+ /* XXX: This doesn't really belong here. */
+ {
+ struct folio *folio = objects_folio (activity, object);
+ folio->objects[objects_folio_offset (object)].content = 0;
+ }
+
struct cap cap = object_desc_to_cap (desc);
cap_shootdown (activity, &cap);
- ss_mutex_lock (&lru_lock);
- object_desc_disown (desc);
-
- struct object_global_lru_list *global;
- if (desc->age)
- /* DESC is active. */
- global = &global_active;
- else
- /* DESC is inactive. */
- if (desc->dirty && ! desc->policy.discardable)
- /* And dirty. */
- global = &global_inactive_dirty;
- else
- /* And clean. */
- global = &global_inactive_clean;
-
- object_activity_lru_list_unlink (&disowned, desc);
- object_global_lru_list_unlink (global, desc);
-
- ss_mutex_unlock (&lru_lock);
+ object_desc_claim (NULL, desc, desc->policy, true);
if (desc->type == cap_activity_control)
{
@@ -199,8 +177,7 @@ memory_object_destroy (struct activity *activity, struct object *object)
memset (desc, 0xde, sizeof (struct object_desc));
#endif
- /* Return the frame to the free pool. */
- memory_frame_free ((l4_word_t) object);
+ desc->live = 0;
}
struct object *
@@ -225,7 +202,7 @@ object_find_soft (struct activity *activity, oid_t oid,
ownership. */
{
ss_mutex_lock (&lru_lock);
- object_desc_claim (activity, odesc, policy);
+ object_desc_claim (activity, odesc, policy, true);
ss_mutex_unlock (&lru_lock);
}
@@ -460,13 +437,6 @@ folio_free (struct activity *activity, struct folio *folio)
folio->next.type = cap_void;
folio->prev.type = cap_void;
- /* Disown the frame. */
- owner = fdesc->activity;
-
- ss_mutex_lock (&lru_lock);
- object_disown ((struct object *) folio);
- ss_mutex_unlock (&lru_lock);
-
/* And free the folio. */
folio->folio_version = fdesc->version ++;
bit_dealloc (folios, fdesc->oid / (FOLIO_OBJECTS + 1));
@@ -531,7 +501,13 @@ folio_object_alloc (struct activity *activity,
if (type == cap_void)
/* We are deallocating the object: free associated memory. */
{
+ ss_mutex_lock (&lru_lock);
memory_object_destroy (activity, object);
+ ss_mutex_unlock (&lru_lock);
+
+ /* Return the frame to the free pool. */
+ memory_frame_free ((l4_word_t) object);
+
object = NULL;
}
else
@@ -541,7 +517,7 @@ folio_object_alloc (struct activity *activity,
cap_shootdown (activity, &cap);
ss_mutex_lock (&lru_lock);
- object_desc_claim (activity, odesc, policy);
+ object_desc_claim (activity, odesc, policy, true);
ss_mutex_unlock (&lru_lock);
}
@@ -624,82 +600,96 @@ folio_policy (struct activity *activity,
}
void
-object_desc_disown_simple (struct object_desc *desc)
-{
- assert (! ss_mutex_trylock (&lru_lock));
- assert (desc->activity);
-
- struct object_activity_lru_list *list;
- if (desc->age)
- /* DESC is active. */
- list = &desc->activity->active;
- else
- /* DESC is inactive. */
- if (desc->dirty && ! desc->policy.discardable)
- /* And dirty. */
- list = &desc->activity->inactive_dirty;
- else
- /* And clean. */
- list = &desc->activity->inactive_clean;
-
- object_activity_lru_list_unlink (list, desc);
- object_activity_lru_list_push (&disowned, desc);
-
- if (desc->policy.priority != OBJECT_PRIORITY_LRU)
- hurd_btree_priorities_detach (&desc->activity->priorities, desc);
-
- desc->activity = NULL;
-}
-
-void
-object_desc_disown_ (struct object_desc *desc)
+object_desc_claim (struct activity *activity, struct object_desc *desc,
+ struct object_policy policy, bool update_accounting)
{
- activity_charge (desc->activity, -1);
- object_desc_disown_simple (desc);
-}
-
-void
-object_desc_claim_ (struct activity *activity, struct object_desc *desc,
- struct object_policy policy)
-{
- assert (activity);
-
- if (desc->activity == activity)
- /* Same owner: update the policy. */
+ assert (desc->activity || activity);
+
+ if (desc->activity == activity
+ && ! desc->eviction_candidate
+ && desc->policy.priority == policy.priority)
+ /* The owner remains the same, the object is not an eviction
+ candidate and the priority didn't change; don't do any
+ unnecessary work. */
{
desc->policy.discardable = policy.discardable;
-
- if (desc->policy.priority == policy.priority)
- /* The priority didn't change; don't do any unnecessary work. */
- return;
-
- if (desc->policy.priority != OBJECT_PRIORITY_LRU)
- hurd_btree_priorities_detach (&desc->activity->priorities, desc);
-
- desc->policy.priority = policy.priority;
-
- if (desc->policy.priority != OBJECT_PRIORITY_LRU)
- hurd_btree_priorities_insert (&desc->activity->priorities, desc);
-
return;
}
+
+ /* We need to disconnect DESC from its old activity. If DESC does
+ not have an activity, it being initialized. */
if (desc->activity)
- /* Already claimed by another activity; first disown it. */
- object_desc_disown (desc);
+ {
+ assert (object_type ((struct object *) desc->activity)
+ == cap_activity_control);
- /* DESC->ACTIVITY is NULL so DESC must be on DISOWNED. */
- object_activity_lru_list_unlink (&disowned, desc);
- object_activity_lru_list_push (&activity->active, desc);
- desc->activity = activity;
- activity_charge (activity, 1);
+ if (desc->eviction_candidate)
+ /* DESC is an eviction candidate. The act of claiming saves
+ it. */
+ {
+ if (desc->dirty && ! desc->policy.discardable)
+ {
+ laundry_list_unlink (&laundry, desc);
+ eviction_list_unlink (&desc->activity->eviction_dirty, desc);
+ }
+ else
+ {
+ available_list_unlink (&available, desc);
+ eviction_list_unlink (&desc->activity->eviction_clean, desc);
+ }
+ }
+ else
+ {
+ if (desc->policy.priority != OBJECT_PRIORITY_LRU)
+ hurd_btree_priorities_detach (&desc->activity->priorities, desc);
+ else
+ {
+ struct activity_lru_list *list;
+ if (desc->age)
+ /* DESC is active. */
+ list = &desc->activity->active;
+ else
+ /* DESC is inactive. */
+ if (desc->dirty && ! desc->policy.discardable)
+ /* And dirty. */
+ list = &desc->activity->inactive_dirty;
+ else
+ /* And clean. */
+ list = &desc->activity->inactive_clean;
+
+ activity_lru_list_unlink (list, desc);
+ }
+
+ if (activity != desc->activity && update_accounting)
+ activity_charge (desc->activity, -1);
+ }
+ }
+
+ if (! activity)
+ return;
- desc->policy.discardable = policy.discardable;
desc->policy.priority = policy.priority;
- if (policy.priority != OBJECT_PRIORITY_LRU)
- /* Add to ACTIVITY's priority queue. */
+
+ /* Assign to ACTIVITY. */
+
+ /* We make the object active. The invariants require that DESC->AGE
+ be non-zero. */
+ object_age (desc);
+ if (desc->policy.priority != OBJECT_PRIORITY_LRU)
{
- void *ret = hurd_btree_priorities_insert (&activity->priorities, desc);
+ void *ret = hurd_btree_priorities_insert (&activity->priorities,
+ desc);
assert (! ret);
}
+ else
+ activity_lru_list_push (&activity->active, desc);
+
+ if ((desc->eviction_candidate || activity != desc->activity)
+ && update_accounting)
+ activity_charge (activity, 1);
+
+ desc->eviction_candidate = false;
+ desc->activity = activity;
+ desc->policy.discardable = policy.discardable;
}
diff --git a/viengoos/object.h b/viengoos/object.h
index 4d4a903..c7e2390 100644
--- a/viengoos/object.h
+++ b/viengoos/object.h
@@ -117,41 +117,74 @@ struct activity;
/* An object descriptor. There is one for each in-memory object. */
struct object_desc
{
- /* Every in-memory object lives in a hash hashed on its OID. */
- void *locp;
-
+ /* Protects the following memorys and the object's data. */
ss_mutex_t lock;
/* The version and OID of the object. */
oid_t oid;
-
l4_word_t version : CAP_VERSION_BITS;
l4_word_t type : CAP_TYPE_BITS;
+ /* Whether the page is dirty. */
l4_word_t dirty: 1;
+ /* Whether the object has been selected for eviction. */
+ l4_word_t eviction_candidate : 1;
+
+ /* Whether the object is live. */
+ l4_word_t live: 1;
+
+ /* The object's policy. Set when the object is claimed using the
+ value in the capability referencing the object. */
struct object_policy policy;
/* The object's age. */
uint16_t age;
- /* Ordered list of all owned objects whose priority is other than
- OBJECT_PRIORITY_LRU. */
- hurd_btree_node_t priority_node;
-
/* The activity to which the *memory* (not the storage) is
attributed. If none, then NULL. (The owner of the storage can
be found by looking at the folio.) */
struct activity *activity;
- /* Each allocated object is attached to the activity that pays for
- it. If ACTIVITY is NULL, then attached to DISOWNED. Protected
- by LRU_LOCK. */
- struct list_node activity_lru;
+ /* The following members are protected by LRU_LOCK. */
+
+ /* Every in-memory object lives in a hash hashed on its OID. */
+ void *locp;
+
+ union
+ {
+ /* ACTIVITY is valid, EVICTION_CANDIDATE is false, POLICY.PRIORITY
+ != OBJECT_PRIORITY_LRU.
+
+ => attached to ACTIVITY->PRIORITIES. */
+ hurd_btree_node_t priority_node;
+
+ /* ACTIVITY is valid, EVICTION_CANDIDATE is false, POLICY.PRIORITY
+ == OBJECT_PRIORITY_LRU,
+
+ => attached to one of ACTIVITY's LRU lists.
- /* Each allocated object is attached to either the global active or
- the global inactive list. */
- struct list_node global_lru;
+ EVICTION_CANDIDATE is true
+
+ => attached to either ACTIVITY->EVICTION_CLEAN or
+ ACTIVITY->EVICTION_DIRTY. */
+ struct list_node activity_node;
+ };
+
+ union
+ {
+ /* If EVICTION_CANDIDATE is true and DIRTY and
+ !POLICY.DISCARDABLE, then attached to LAUNDRY.
+
+ If EVICTION_CANDIDATE is false, MAY be attached to LAUNDRY if
+ DIRTY and !POLICY.DISCARDABLE. (In this case, it is a
+ optimistic sync.) */
+ struct list_node laundry_node;
+
+ /* If EVICTION_CANDIDATE is true and either !DIRTY or
+ POLICY.DISCARDABLE, then attached to AVAILABLE. */
+ struct list_node available_node;
+ };
};
/* We keep an array of object descriptors. There is a linear mapping
@@ -160,28 +193,29 @@ struct object_desc
memory map. */
extern struct object_desc *object_descs;
-LIST_CLASS(object_activity_lru, struct object_desc, activity_lru)
-LIST_CLASS(object_global_lru, struct object_desc, global_lru)
+LIST_CLASS(activity_lru, struct object_desc, activity_node)
+LIST_CLASS(eviction, struct object_desc, activity_node)
+
+LIST_CLASS(available, struct object_desc, available_node)
+LIST_CLASS(laundry, struct object_desc, laundry_node)
-/* Lock protecting the following lists and each object descriptor's
- activity_lru, global_lru and priority_node fields. */
+/* Lock protecting the following lists as well as an activity's
+ lists. */
extern ss_mutex_t lru_lock;
-/* The global LRU lists. Every allocated frame is on one of these
- two. */
-extern struct object_global_lru_list global_active;
-extern struct object_global_lru_list global_inactive_dirty;
-extern struct object_global_lru_list global_inactive_clean;
-/* Objects that are not accounted to any activity (e.g., if the
- activity to which an object is attached is destroyed, it is
- attached to this list). */
-extern struct object_activity_lru_list disowned;
+/* List of objects that need syncing to backing store. */
+extern struct laundry_list laundry;
+/* List of clean objects available for reallocation. */
+extern struct available_list available;
/* Sort lower priority objects towards the start. */
static int
priority_compare (const struct object_policy *a,
const struct object_policy *b)
{
+ /* XXX: We should actually compare on priority and then on age. To
+ allowing finding an object with a particular priority but any
+ age, we need to have a special key. */
return (int) a->priority - (int) b->priority;
}
@@ -204,33 +238,40 @@ extern struct object *object_find_soft (struct activity *activity,
oid_t oid,
struct object_policy policy);
+/* Destroy the object OBJECT. Any changes must have already been
+ flushed to disk. LRU_LOCK must be held. Does NOT release the
+ memory. It is the caller's responsibility to ensure that
+ memory_frame_free is eventually called. */
+extern void memory_object_destroy (struct activity *activity,
+ struct object *object);
+
/* Return the object corresponding to the object descriptor DESC. */
-#define object_desc_to_object(desc_) \
- ({ \
- struct object_desc *desc__ = (desc_); \
- /* There is only one legal area for descriptors. */ \
- assert ((uintptr_t) object_descs <= (uintptr_t) (desc__)); \
- assert ((uintptr_t) (desc__) \
- <= (uintptr_t) &object_descs[(last_frame - first_frame) \
- / PAGESIZE]); \
- \
- (struct object *) (first_frame \
+#define object_desc_to_object(desc_) \
+ ({ \
+ struct object_desc *desc__ = (desc_); \
+ /* There is only one legal area for descriptors. */ \
+ assert ((uintptr_t) object_descs <= (uintptr_t) (desc__)); \
+ assert ((uintptr_t) (desc__) \
+ <= (uintptr_t) &object_descs[(last_frame - first_frame) \
+ / PAGESIZE]); \
+ \
+ (struct object *) (first_frame \
+ (((uintptr_t) (desc__) - (uintptr_t) object_descs) \
- / sizeof (struct object_desc)) * PAGESIZE); \
+ / sizeof (struct object_desc)) * PAGESIZE); \
})
/* Return the object descriptor corresponding to the object
OBJECT. */
-#define object_to_object_desc(object_) \
- ({ \
- struct object *object__ = (object_); \
- /* Objects better be on a page boundary. */ \
- assert (((uintptr_t) (object__) & (PAGESIZE - 1)) == 0); \
- /* And they better be in memory. */ \
- assert (first_frame <= (uintptr_t) (object__)); \
- assert ((uintptr_t) (object__) <= last_frame); \
- \
- &object_descs[((uintptr_t) (object__) - first_frame) / PAGESIZE]; \
+#define object_to_object_desc(object_) \
+ ({ \
+ struct object *object__ = (object_); \
+ /* Objects better be on a page boundary. */ \
+ assert (((uintptr_t) (object__) & (PAGESIZE - 1)) == 0); \
+ /* And they better be in memory. */ \
+ assert (first_frame <= (uintptr_t) (object__)); \
+ assert ((uintptr_t) (object__) <= last_frame); \
+ \
+ &object_descs[((uintptr_t) (object__) - first_frame) / PAGESIZE]; \
})
/* Return a cap referencing the object designated by OBJECT_DESC. */
@@ -261,64 +302,57 @@ object_type (struct object *object)
{
return object_to_object_desc (object)->type;
}
-
-/* Like object_disown but does not adjust DESC->ACTIVITY->FRAMES.
- (This is useful when removing multiple frames at once.) */
-extern void object_desc_disown_simple (struct object_desc *desc);
-static inline void
-object_disown_simple (struct object *object)
+/* Unmaps the object corresponding to DESC from all clients. Returns
+ whether it was dirty. */
+static inline bool
+object_desc_unmap (struct object_desc *desc)
{
- object_desc_disown_simple (object_to_object_desc (object));
-}
+ assert (desc->live);
+
+ struct object *object = object_desc_to_object (desc);
+
+ l4_fpage_t flush = l4_fpage ((l4_word_t) object, PAGESIZE);
+ l4_fpage_t unmap = l4_fpage_add_rights (flush, L4_FPAGE_FULLY_ACCESSIBLE);
-extern inline void object_desc_disown_ (struct object_desc *desc);
-#define object_desc_disown(d) \
- ({ debug (5, "object_desc_disown: %p (%d)", \
- d->activity, d->activity->frames_total); \
- assert (! ss_mutex_trylock (&lru_lock)); \
- object_desc_disown_ (d); })
+ desc->dirty |= l4_was_written (l4_unmap_fpage (unmap));
-/* The activity to which OBJECT is accounted should no longer be
- accounted OJBECT. Attaches object to the DISOWNED list. */
+ if (! desc->dirty)
+ /* We may have dirty it. */
+ desc->dirty |= l4_was_written (l4_flush (flush));
+
+ return desc->dirty;
+}
+
+/* Transfer ownership of DESC to the activity ACTIVITY. If ACTIVITY
+ is NULL, detaches DESC from lists (this functionality should only
+ be used by memory_object_destroy). If UPDATE_ACCOUNTING is not
+ true, it is the caller's responsibility to update the accounting
+ information for the old owner and the new owner. */
+extern void object_desc_claim (struct activity *activity,
+ struct object_desc *desc,
+ struct object_policy policy,
+ bool update_accounting);
+
+/* See object_desc_claim. */
static inline void
-object_disown_ (struct object *object)
+object_claim (struct activity *activity, struct object *object,
+ struct object_policy policy, bool update_accounting)
{
- object_desc_disown_ (object_to_object_desc (object));
+ object_desc_claim (activity, object_to_object_desc (object), policy,
+ update_accounting);
}
-#define object_disown(o) \
- ({ \
- debug (5, "object_disown: "); \
- assert (! ss_mutex_trylock (&lru_lock)); \
- object_disown_ (o); \
- })
-
-/* Transfer ownership of DESC to the activity ACTIVITY. */
-extern void object_desc_claim_ (struct activity *activity,
- struct object_desc *desc,
- struct object_policy policy);
-#define object_desc_claim(__odc_a, __odc_o, __odc_p) \
- ({ \
- debug (5, "object_desc_claim: %p (%d)", \
- (__odc_a), (__odc_a)->frames_total); \
- assert (! ss_mutex_trylock (&lru_lock)); \
- object_desc_claim_ ((__odc_a), (__odc_o), (__odc_p)); \
- })
-
-/* Transfer ownership of OBJECT to the activity ACTIVITY. */
+
static inline void
-object_claim_ (struct activity *activity, struct object *object,
- struct object_policy policy)
+object_age (struct object_desc *desc)
{
- object_desc_claim_ (activity, object_to_object_desc (object), policy);
+ /* The amount to age a frame when it is found to be active. */
+#define AGE_DELTA (1 << 8)
+
+ /* Be careful with wrap around. */
+ if ((typeof (desc->age)) (desc->age + AGE_DELTA) > desc->age)
+ desc->age += AGE_DELTA;
}
-#define object_claim(__oc_a, __oc_o, __oc_p) \
- ({ \
- debug (5, "object_claim: %p (%d)", \
- (__oc_a), (__oc_a)->frames_total); \
- assert (! ss_mutex_trylock (&lru_lock)); \
- object_claim_ ((__oc_a), (__oc_o)); \
- })
/* Allocate a folio to activity ACTIVITY. POLICY is the new folio's
initial storage policy. Returns NULL if not possible. Otherwise a
@@ -352,20 +386,37 @@ folio_object_free (struct activity *activity,
OBJECT_POLICY_VOID, NULL);
}
-/* Deallocate the object OBJECT. */
-static inline void
-object_free (struct activity *activity, struct object *object)
+/* Return an object's position within its folio. */
+static inline int
+objects_folio_offset (struct object *object)
+{
+ struct object_desc *desc = object_to_object_desc (object);
+
+ return (desc->oid % (1 + FOLIO_OBJECTS)) - 1;
+}
+
+/* Return the folio corresponding to the object OBJECT. */
+static inline struct folio *
+objects_folio (struct activity *activity, struct object *object)
{
struct object_desc *odesc = object_to_object_desc (object);
- int page = (odesc->oid % (1 + FOLIO_OBJECTS)) - 1;
+ int page = objects_folio_offset (object);
oid_t foid = odesc->oid - page - 1;
struct folio *folio = (struct folio *) object_find (activity, foid,
OBJECT_POLICY_VOID);
assert (folio);
- folio_object_free (activity, folio, page);
+ return folio;
+}
+
+/* Deallocate the object OBJECT. */
+static inline void
+object_free (struct activity *activity, struct object *object)
+{
+ folio_object_free (activity, objects_folio (activity, object),
+ objects_folio_offset (object));
}
/* Get and set folio FOLIO's storage policy according to flags FLAGS,
diff --git a/viengoos/pager.c b/viengoos/pager.c
new file mode 100644
index 0000000..730d49a
--- /dev/null
+++ b/viengoos/pager.c
@@ -0,0 +1,373 @@
+/* pager.c - Pager implementation.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "memory.h"
+#include "zalloc.h"
+#include "activity.h"
+#include "object.h"
+
+/* We try to keep at least 1/8 (12.5%) of memory available for
+ immediate allocation. */
+#define LOW_WATER_MARK (memory_total / 8)
+/* When we start freeing, we try to get at least 3/16 (~19%) of memory
+ available for immediate allocation. */
+#define HIGH_WATER_MARK (LOW_WATER_MARK + LOW_WATER_MARK / 2)
+
+/* Free memory from the activity or activities with the largest
+ disproportionate share of memory such that the number of clean
+ pages plus the number of pages in the laundry exceed the low-memory
+ threshold. */
+void
+pager_collect (void)
+{
+ int available_pages = zalloc_memory
+ + available_list_count (&available)
+ /* We only count the pages on the laundry half as they won't be
+ available immediately. */
+ + laundry_list_count (&laundry) / 2;
+
+ if (available_pages > LOW_WATER_MARK)
+ return;
+
+ /* We've dipped below the low mark mark. Free enough memory such
+ that we are above the high water mark. */
+
+ int goal = HIGH_WATER_MARK - available_pages;
+
+ debug (0, "Frames: %d, available: %d (%d%%) (dirty: %d), "
+ "low water: %d, goal: %d",
+ memory_total,
+ available_pages, (available_pages * 100) / memory_total,
+ laundry_list_count (&laundry),
+ LOW_WATER_MARK, goal);
+
+ /* Find a victim. */
+ struct activity *victim = root_activity;
+ struct activity *parent;
+
+ struct activity_memory_policy victim_policy;
+ int victim_frames;
+
+ do
+ {
+ /* The total weight and the total number of frames of the lowest
+ priority group. */
+ int weight;
+ int frames;
+
+ parent = victim;
+ victim_frames = victim->frames_local;
+ victim_policy = victim->policy.child_rel;
+ weight = victim_policy.weight;
+ frames = victim_frames;
+
+ struct activity *child;
+ activity_for_each_inmemory_child
+ (parent, child,
+ ({
+ if (child->policy.sibling_rel.priority < victim_policy.priority)
+ /* CHILD has a lower absolute priority. */
+ {
+ victim = child;
+ victim_frames = victim->frames_total;
+ victim_policy = victim->policy.sibling_rel;
+
+ /* Reset the weight. */
+ weight = victim_policy.weight;
+ frames = victim_frames;
+ }
+ else if (child->policy.sibling_rel.priority
+ == victim_policy.priority)
+ /* CHILD and VICTIM have equal priority. Steal from the one
+ which has the most pages taking into their respective
+ weights. */
+ {
+ weight += child->policy.sibling_rel.weight;
+ frames += child->frames_total;
+
+ int f = child->frames_total + victim_frames;
+ int w = child->policy.sibling_rel.weight + victim_policy.weight;
+
+ int child_excess = child->frames_total
+ - (child->policy.sibling_rel.weight * f) / w;
+ int victim_excess = victim_frames
+ - (victim_policy.weight * f) / w;
+
+ if (child_excess > victim_excess)
+ /* CHILD has more excess frames than VICTIM. */
+ {
+ victim = child;
+ victim_frames = victim->frames_total;
+ victim_policy = victim->policy.sibling_rel;
+ }
+ }
+ }));
+
+ if (frames >= goal)
+ /* The number of frames at this priority level exceed the
+ goal. Page all activity's at this priority level
+ out. */
+ {
+ /* XXX: Do it. */
+ }
+
+ /* VICTIM's share of the frames allocated to all the activity's
+ at this priority level. */
+ int share;
+ if (weight == 0)
+ share = 0;
+ else
+ share = (frames * victim_policy.weight) / weight;
+
+ /* VICTIM's share must be less than or equal to the frames
+ allocated to this priority as we know that this activity has
+ an excess and VICTIM is the most excessive. */
+ assertx (share < victim->frames_total,
+ "%d <= %d", share, victim->frames_total);
+
+ if (victim->frames_total - share < goal)
+ /* VICTIM's excess is less than the amount we want to reclaim.
+ Reclaim from the second worst offender after we finish
+ here. */
+ {
+ /* XXX: Do it. */
+ }
+
+ if (victim->frames_total - share < goal)
+ goal = victim->frames_total - share;
+
+ debug (0, "Choosing activity " OID_FMT "%s, %d/%d frames, "
+ "share: %d, goal: %d",
+ OID_PRINTF (object_to_object_desc ((struct object *) victim)->oid),
+ victim->parent ? "" : " (root activity)",
+ victim->frames_total, victim->frames_local, share, goal);
+ }
+ while (victim != parent);
+ assert (victim);
+
+ /* We want to steal GOAL pages from VICTIM. */
+
+ int count = 0;
+ int laundry_count = 0;
+
+ /* XXX: Implement group dealloc. */
+
+ ss_mutex_lock (&lru_lock);
+
+ /* First try objects with a priority lower than LRU. */
+
+ struct object_desc *desc;
+ struct object_desc *next = hurd_btree_priorities_first (&victim->priorities);
+ while (((desc = next)) && count < goal)
+ {
+ assert (! desc->eviction_candidate);
+
+ assert (desc->policy.priority != OBJECT_PRIORITY_LRU);
+ if (desc->policy.priority > OBJECT_PRIORITY_LRU)
+ /* DESC's priority is higher than OBJECT_PRIORITY_LRU, prefer
+ LRU ordered pages. */
+ break;
+
+ next = hurd_btree_priorities_next (desc);
+
+ object_desc_unmap (desc);
+
+ hurd_btree_priorities_detach (&victim->priorities, desc);
+
+ desc->eviction_candidate = true;
+
+ if (desc->dirty && ! desc->policy.discardable)
+ {
+ if (! list_node_attached (&desc->laundry_node))
+ laundry_list_queue (&laundry, desc);
+
+ eviction_list_queue (&victim->eviction_dirty, desc);
+ laundry_count ++;
+ }
+ else
+ {
+ assert (! list_node_attached (&desc->available_node));
+
+ available_list_queue (&available, desc);
+ eviction_list_queue (&victim->eviction_clean, desc);
+ }
+
+ count ++;
+ }
+
+ if (count < goal)
+ /* VICTIM still has to yield pages. Start stealing from the
+ inactive LRU lists. */
+ {
+ struct object_desc *clean, *dirty;
+
+ /* For every clean page we steal, we queue a dirty page for
+ writeout. */
+
+ clean = activity_lru_list_head (&victim->inactive_clean);
+ dirty = activity_lru_list_head (&victim->inactive_dirty);
+
+ struct object_desc *next;
+
+ while ((clean || dirty) && count < goal)
+ {
+ if (clean)
+ {
+ assert (! clean->eviction_candidate);
+ assert (! clean->dirty || clean->policy.discardable);
+ assert (! list_node_attached (&clean->available_node));
+
+ next = activity_lru_list_next (clean);
+
+ activity_lru_list_unlink (&victim->inactive_clean, clean);
+
+ if (object_desc_unmap (clean))
+ /* It is possible that the page was dirtied between
+ the last check and now. */
+ {
+ eviction_list_push (&victim->eviction_dirty, clean);
+
+ laundry_list_queue (&laundry, clean);
+ }
+ else
+ {
+ eviction_list_push (&victim->eviction_clean, clean);
+
+ available_list_queue (&available, clean);
+ }
+
+ clean->eviction_candidate = true;
+
+ count ++;
+
+ clean = next;
+ }
+
+ if (count == goal)
+ break;
+
+ if (dirty)
+ {
+ assert (! dirty->eviction_candidate);
+ assert (dirty->dirty && ! dirty->policy.discardable);
+ assert (! list_node_attached (&dirty->laundry_node));
+
+ next = activity_lru_list_next (dirty);
+
+ object_desc_unmap (dirty);
+
+ dirty->eviction_candidate = true;
+
+ activity_lru_list_unlink (&victim->inactive_dirty, dirty);
+ eviction_list_push (&victim->eviction_dirty, dirty);
+
+ laundry_list_queue (&laundry, dirty);
+
+ laundry_count ++;
+ count ++;
+
+ dirty = next;
+ }
+ }
+ }
+
+ if (count < goal)
+ /* Still hasn't yielded enough. Steal from the active list. */
+ {
+ struct object_desc *desc;
+ struct object_desc *next = activity_lru_list_head (&victim->active);
+
+ while ((desc = next) && count < goal)
+ {
+ assert (! desc->eviction_candidate);
+
+ next = activity_lru_list_next (desc);
+
+ object_desc_unmap (desc);
+
+ desc->eviction_candidate = true;
+
+ activity_lru_list_unlink (&victim->active, desc);
+
+ if (desc->dirty && ! desc->policy.discardable)
+ {
+ if (! list_node_attached (&desc->laundry_node))
+ laundry_list_queue (&laundry, desc);
+
+ eviction_list_queue (&victim->eviction_dirty, desc);
+ laundry_count ++;
+ }
+ else
+ {
+ assert (! list_node_attached (&desc->available_node));
+
+ available_list_queue (&available, desc);
+ eviction_list_queue (&victim->eviction_clean, desc);
+ }
+
+ count ++;
+ }
+ }
+
+ if (count < goal)
+ /* We've cleared the low priority nodes, the inactive LRU lists
+ and the active LRU list. Steal from the high priority
+ lists. */
+ {
+ /* NEXT points to the first high priority object descriptor. */
+
+ while (count < goal && (desc = next))
+ {
+ assert (! desc->eviction_candidate);
+ assert (desc->policy.priority > OBJECT_PRIORITY_LRU);
+
+ next = hurd_btree_priorities_next (desc);
+
+ object_desc_unmap (desc);
+
+ hurd_btree_priorities_detach (&victim->priorities, desc);
+
+ desc->eviction_candidate = true;
+
+ if (desc->dirty && ! desc->policy.discardable)
+ {
+ if (! list_node_attached (&desc->laundry_node))
+ laundry_list_queue (&laundry, desc);
+
+ eviction_list_queue (&victim->eviction_dirty, desc);
+ laundry_count ++;
+ }
+ else
+ {
+ assert (! list_node_attached (&desc->available_node));
+
+ available_list_queue (&available, desc);
+ eviction_list_queue (&victim->eviction_clean, desc);
+ }
+
+ count ++;
+ }
+ }
+
+ debug (0, "Goal: %d, %d in laundry, %d made available",
+ goal, laundry_count, count - laundry_count);
+
+ ss_mutex_unlock (&lru_lock);
+}
diff --git a/viengoos/pager.h b/viengoos/pager.h
new file mode 100644
index 0000000..eb24130
--- /dev/null
+++ b/viengoos/pager.h
@@ -0,0 +1,22 @@
+/* pager.h - Pager interface.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Free memory. */
+extern void pager_collect (void);
diff --git a/viengoos/t-environment.h b/viengoos/t-environment.h
index 6367f52..f49e374 100644
--- a/viengoos/t-environment.h
+++ b/viengoos/t-environment.h
@@ -277,6 +277,12 @@ environment_init (int argc, char *argv[])
while (0)
+/* Some stub functions. */
+void
+pager_collect (void)
+{
+}
+
#include "output.h"
void test (void);
diff --git a/viengoos/viengoos.c b/viengoos/viengoos.c
index c1df482..e0e8ae1 100644
--- a/viengoos/viengoos.c
+++ b/viengoos/viengoos.c
@@ -200,7 +200,8 @@ memory_configure (void)
void
system_task_load (void)
{
- struct hurd_startup_data *startup_data = (void *) memory_frame_allocate ();
+ struct hurd_startup_data *startup_data
+ = (void *) memory_frame_allocate (NULL);
bool boot_strapped = false;
@@ -300,11 +301,17 @@ system_task_load (void)
root_activity = (struct activity *) cap_to_object (root_activity, &rt.cap);
folio_parent (root_activity, folio);
+ object_claim (root_activity, (struct object *) root_activity,
+ OBJECT_POLICY_VOID, true);
+ object_claim (root_activity, (struct object *) folio,
+ OBJECT_POLICY_VOID, true);
+
rt = allocate_object (cap_thread, startup_data->thread);
startup_data->thread = rt.storage;
thread = (struct thread *) cap_to_object (root_activity, &rt.cap);
thread->activity = object_to_cap ((struct object *) root_activity);
+
/* Insert the objects we've allocated so far into TASK's address
space. */
boot_strapped = true;