diff options
author | Richard Braun <rbraun@sceen.net> | 2014-09-23 01:30:01 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2014-09-23 01:34:53 +0200 |
commit | e388bf7372e850ab0397f5647b6407dbf4401569 (patch) | |
tree | 457ea297c932a32b0b757e2673b68f9f89824fb8 /kern/sref.c | |
parent | 9817163fb65ef95431eb826985febdc916f10773 (diff) |
kern/sref: fix handling of dirty zero deltas
There is a subtle error in the Refcache pseudocode :
flush():
evict all non-zero local cache entries and clear cache
update the current epoch
Here is a use case demonstrating the issue : take the sref_dirty_zeroes
test module on a machine with two processors. The test code makes
sure the counter is incremented before being decremented through the
use of a condition variable, which also guarantees strong memory
ordering, but these operations can occur on any processor. Flushes
can occur in any order.
EVENT DELTA/0 DELTA/1 GLOBAL
start 0 0 1
inc/0 1 1
dec/1 -1 1
flush/1 0 0
inc/1 1 0
dec/0 0 0
flush/0 0 0
end_of_epoch 0 <- first epoch
inc/0 1 0
dec/1 0 0
flush/1 0 0 <- flushing 0
inc/1 1 0
dec/0 0 0
flush/0 0 0 <- flushing 0
end_of_epoch 0 <- second epoch
Here, deltas are zero on both flushes during the second epoch, leading
the implementation to determine that the counter is a true zero. Fix
the error by unconditionally flushing valid deltas, even if zero,
and actually clearing the cache.
Diffstat (limited to 'kern/sref.c')
-rw-r--r-- | kern/sref.c | 65 |
1 files changed, 34 insertions, 31 deletions
diff --git a/kern/sref.c b/kern/sref.c index 6c84512e..21d6a4e6 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -113,8 +113,12 @@ struct sref_data { * * Deltas are stored in per-processor caches and added to their global * counter when flushed. A delta is valid if and only if the counter it - * points to isn't NULL. If the value of a delta is not zero, the delta - * must be valid. + * points to isn't NULL. + * + * On cache flush, if a delta is valid, it must be flushed whatever its + * value because a delta can be a dirty zero too. By flushing all valid + * deltas, and clearing them all after a flush, activity on a counter is + * reliably reported. */ struct sref_delta { struct sref_counter *counter; @@ -152,7 +156,7 @@ struct sref_delta { struct sref_cache { struct mutex lock; struct sref_delta deltas[SREF_MAX_DELTAS]; - struct evcnt ev_evict; + struct evcnt ev_collision; struct evcnt ev_flush; struct thread *manager; int registered; @@ -302,8 +306,6 @@ sref_counter_schedule_review(struct sref_counter *counter) static void sref_counter_add(struct sref_counter *counter, unsigned long delta) { - assert(delta != 0); - spinlock_lock(&counter->lock); counter->value += delta; @@ -339,6 +341,13 @@ sref_delta_set_counter(struct sref_delta *delta, struct sref_counter *counter) } static inline void +sref_delta_clear(struct sref_delta *delta) +{ + assert(delta->value == 0); + delta->counter = NULL; +} + +static inline void sref_delta_inc(struct sref_delta *delta) { delta->value++; @@ -356,20 +365,20 @@ sref_delta_is_valid(const struct sref_delta *delta) return (delta->counter != NULL); } -static inline int -sref_delta_is_zero(const struct sref_delta *delta) -{ - return (delta->value == 0); -} - static void sref_delta_flush(struct sref_delta *delta) { - assert(delta->value != 0); sref_counter_add(delta->counter, delta->value); delta->value = 0; } +static void +sref_delta_evict(struct sref_delta *delta) +{ + sref_delta_flush(delta); + sref_delta_clear(delta); +} + static inline unsigned long sref_review_queue_size(void) { @@ -437,8 +446,8 @@ sref_cache_init(struct sref_cache *cache, unsigned int cpu) sref_delta_init(delta); } - snprintf(name, sizeof(name), "sref_evict/%u", cpu); - evcnt_register(&cache->ev_evict, name); + snprintf(name, sizeof(name), "sref_collision/%u", cpu); + evcnt_register(&cache->ev_collision, name); snprintf(name, sizeof(name), "sref_flush/%u", cpu); evcnt_register(&cache->ev_flush, name); cache->manager = NULL; @@ -507,16 +516,6 @@ sref_cache_clear_dirty(struct sref_cache *cache) cache->dirty = 0; } -static void -sref_cache_evict(struct sref_cache *cache, struct sref_delta *delta) -{ - if (sref_delta_is_zero(delta)) - return; - - sref_delta_flush(delta); - evcnt_inc(&cache->ev_evict); -} - static struct sref_delta * sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter) { @@ -527,8 +526,9 @@ sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter) if (!sref_delta_is_valid(delta)) sref_delta_set_counter(delta, counter); else if (sref_delta_counter(delta) != counter) { - sref_cache_evict(cache, delta); + sref_delta_flush(delta); sref_delta_set_counter(delta, counter); + evcnt_inc(&cache->ev_collision); } return delta; @@ -540,11 +540,13 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue) struct sref_delta *delta; unsigned int i, cpu; + mutex_lock(&cache->lock); + for (i = 0; i < ARRAY_SIZE(cache->deltas); i++) { delta = sref_cache_delta(cache, i); - if (!sref_delta_is_zero(delta)) - sref_delta_flush(delta); + if (sref_delta_is_valid(delta)) + sref_delta_evict(delta); } cpu = cpu_id(); @@ -570,6 +572,8 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue) sref_cache_clear_dirty(cache); evcnt_inc(&cache->ev_flush); + + mutex_unlock(&cache->lock); } static void @@ -687,10 +691,7 @@ sref_manage(void *arg) cpu_intr_restore(flags); thread_preempt_enable(); - mutex_lock(&cache->lock); sref_cache_flush(cache, &queue); - mutex_unlock(&cache->lock); - sref_review(&queue); } @@ -773,8 +774,10 @@ sref_register(void) sref_data.nr_registered_cpus++; if ((sref_data.nr_registered_cpus == 1) - && (sref_data.nr_pending_flushes == 0)) + && (sref_data.nr_pending_flushes == 0)) { + assert(sref_review_queue_empty()); sref_reset_pending_flushes(); + } spinlock_unlock(&sref_data.lock); |