summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/machine/pmap.c32
-rw-r--r--kern/llsync.c18
-rw-r--r--kern/llsync_i.h6
-rw-r--r--kern/sref.c40
-rw-r--r--kern/syscnt.c2
-rw-r--r--kern/thread.c16
-rw-r--r--kern/work.c8
7 files changed, 61 insertions, 61 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index eee6d51f..b11587d0 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -221,10 +221,10 @@ struct pmap_update_queue {
struct pmap_syncer {
struct thread *thread;
struct pmap_update_queue queue;
- struct syscnt sc_update;
- struct syscnt sc_update_enter;
- struct syscnt sc_update_remove;
- struct syscnt sc_update_protect;
+ struct syscnt sc_updates;
+ struct syscnt sc_update_enters;
+ struct syscnt sc_update_removes;
+ struct syscnt sc_update_protects;
} __aligned(CPU_L1_SIZE);
static void pmap_sync(void *arg);
@@ -816,14 +816,14 @@ pmap_syncer_init(struct pmap_syncer *syncer, unsigned int cpu)
queue = &syncer->queue;
spinlock_init(&queue->lock);
list_init(&queue->requests);
- snprintf(name, sizeof(name), "pmap_update/%u", cpu);
- syscnt_register(&syncer->sc_update, name);
- snprintf(name, sizeof(name), "pmap_update_enter/%u", cpu);
- syscnt_register(&syncer->sc_update_enter, name);
- snprintf(name, sizeof(name), "pmap_update_remove/%u", cpu);
- syscnt_register(&syncer->sc_update_remove, name);
- snprintf(name, sizeof(name), "pmap_update_protect/%u", cpu);
- syscnt_register(&syncer->sc_update_protect, name);
+ snprintf(name, sizeof(name), "pmap_updates/%u", cpu);
+ syscnt_register(&syncer->sc_updates, name);
+ snprintf(name, sizeof(name), "pmap_update_enters/%u", cpu);
+ syscnt_register(&syncer->sc_update_enters, name);
+ snprintf(name, sizeof(name), "pmap_update_removes/%u", cpu);
+ syscnt_register(&syncer->sc_update_removes, name);
+ snprintf(name, sizeof(name), "pmap_update_protects/%u", cpu);
+ syscnt_register(&syncer->sc_update_protects, name);
}
void __init
@@ -1425,7 +1425,7 @@ pmap_update_local(const struct pmap_update_oplist *oplist,
unsigned int i;
syncer = cpu_local_ptr(pmap_syncer);
- syscnt_inc(&syncer->sc_update);
+ syscnt_inc(&syncer->sc_updates);
global_tlb_flush = (nr_mappings > PMAP_UPDATE_MAX_MAPPINGS);
error = 0;
@@ -1438,17 +1438,17 @@ pmap_update_local(const struct pmap_update_oplist *oplist,
switch (op->operation) {
case PMAP_UPDATE_OP_ENTER:
- syscnt_inc(&syncer->sc_update_enter);
+ syscnt_inc(&syncer->sc_update_enters);
error = pmap_update_enter(oplist->pmap, !global_tlb_flush,
&op->enter_args);
break;
case PMAP_UPDATE_OP_REMOVE:
- syscnt_inc(&syncer->sc_update_remove);
+ syscnt_inc(&syncer->sc_update_removes);
pmap_update_remove(oplist->pmap, !global_tlb_flush,
&op->remove_args);
break;
case PMAP_UPDATE_OP_PROTECT:
- syscnt_inc(&syncer->sc_update_protect);
+ syscnt_inc(&syncer->sc_update_protects);
pmap_update_protect(oplist->pmap, !global_tlb_flush,
&op->protect_args);
break;
diff --git a/kern/llsync.c b/kern/llsync.c
index 0cee270b..023548a9 100644
--- a/kern/llsync.c
+++ b/kern/llsync.c
@@ -92,12 +92,12 @@ llsync_setup(void)
spinlock_init(&llsync_data.lock);
work_queue_init(&llsync_data.queue0);
work_queue_init(&llsync_data.queue1);
- syscnt_register(&llsync_data.sc_global_checkpoint,
- "llsync_global_checkpoint");
- syscnt_register(&llsync_data.sc_periodic_checkin,
- "llsync_periodic_checkin");
- syscnt_register(&llsync_data.sc_failed_periodic_checkin,
- "llsync_failed_periodic_checkin");
+ syscnt_register(&llsync_data.sc_global_checkpoints,
+ "llsync_global_checkpoints");
+ syscnt_register(&llsync_data.sc_periodic_checkins,
+ "llsync_periodic_checkins");
+ syscnt_register(&llsync_data.sc_failed_periodic_checkins,
+ "llsync_failed_periodic_checkins");
llsync_data.gcid.value = LLSYNC_INITIAL_GCID;
for (i = 0; i < cpu_count(); i++) {
@@ -143,7 +143,7 @@ llsync_process_global_checkpoint(void)
}
llsync_data.gcid.value++;
- syscnt_inc(&llsync_data.sc_global_checkpoint);
+ syscnt_inc(&llsync_data.sc_global_checkpoints);
}
static void
@@ -273,10 +273,10 @@ llsync_report_periodic_event(void)
llsync_commit_checkpoint(cpu_id());
} else {
if (thread_llsync_in_read_cs()) {
- syscnt_inc(&llsync_data.sc_failed_periodic_checkin);
+ syscnt_inc(&llsync_data.sc_failed_periodic_checkins);
} else {
cpu_data->gcid = gcid;
- syscnt_inc(&llsync_data.sc_periodic_checkin);
+ syscnt_inc(&llsync_data.sc_periodic_checkins);
llsync_commit_checkpoint(cpu_id());
}
}
diff --git a/kern/llsync_i.h b/kern/llsync_i.h
index 10dec944..6bc1bf1d 100644
--- a/kern/llsync_i.h
+++ b/kern/llsync_i.h
@@ -45,9 +45,9 @@ struct llsync_data {
int no_warning;
struct work_queue queue0;
struct work_queue queue1;
- struct syscnt sc_global_checkpoint;
- struct syscnt sc_periodic_checkin;
- struct syscnt sc_failed_periodic_checkin;
+ struct syscnt sc_global_checkpoints;
+ struct syscnt sc_periodic_checkins;
+ struct syscnt sc_failed_periodic_checkins;
/*
* Global checkpoint ID.
diff --git a/kern/sref.c b/kern/sref.c
index 59d07da7..4ae6ae16 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -104,10 +104,10 @@ struct sref_data {
unsigned int nr_pending_flushes;
struct sref_queue queue0;
struct sref_queue queue1;
- struct syscnt sc_epoch;
- struct syscnt sc_dirty_zero;
- struct syscnt sc_revive;
- struct syscnt sc_true_zero;
+ struct syscnt sc_epochs;
+ struct syscnt sc_dirty_zeroes;
+ struct syscnt sc_revives;
+ struct syscnt sc_true_zeroes;
int no_warning;
};
@@ -159,8 +159,8 @@ struct sref_delta {
struct sref_cache {
struct mutex lock;
struct sref_delta deltas[SREF_MAX_DELTAS];
- struct syscnt sc_collision;
- struct syscnt sc_flush;
+ struct syscnt sc_collisions;
+ struct syscnt sc_flushes;
struct thread *manager;
int registered;
int dirty;
@@ -514,7 +514,7 @@ sref_end_epoch(struct sref_queue *queue)
sref_queue_transfer(queue, &sref_data.queue1);
sref_queue_transfer(&sref_data.queue1, &sref_data.queue0);
sref_queue_init(&sref_data.queue0);
- syscnt_inc(&sref_data.sc_epoch);
+ syscnt_inc(&sref_data.sc_epochs);
sref_reset_pending_flushes();
}
@@ -539,10 +539,10 @@ sref_cache_init(struct sref_cache *cache, unsigned int cpu)
sref_delta_init(delta);
}
- snprintf(name, sizeof(name), "sref_collision/%u", cpu);
- syscnt_register(&cache->sc_collision, name);
- snprintf(name, sizeof(name), "sref_flush/%u", cpu);
- syscnt_register(&cache->sc_flush, name);
+ snprintf(name, sizeof(name), "sref_collisions/%u", cpu);
+ syscnt_register(&cache->sc_collisions, name);
+ snprintf(name, sizeof(name), "sref_flushes/%u", cpu);
+ syscnt_register(&cache->sc_flushes, name);
cache->manager = NULL;
cache->registered = 0;
cache->dirty = 0;
@@ -621,7 +621,7 @@ sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter)
} else if (sref_delta_counter(delta) != counter) {
sref_delta_flush(delta);
sref_delta_set_counter(delta, counter);
- syscnt_inc(&cache->sc_collision);
+ syscnt_inc(&cache->sc_collisions);
}
return delta;
@@ -667,7 +667,7 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue)
spinlock_unlock(&sref_data.lock);
sref_cache_clear_dirty(cache);
- syscnt_inc(&cache->sc_flush);
+ syscnt_inc(&cache->sc_flushes);
mutex_unlock(&cache->lock);
}
@@ -782,9 +782,9 @@ sref_review(struct sref_queue *queue)
if ((nr_dirty + nr_revive + nr_true) != 0) {
spinlock_lock(&sref_data.lock);
- syscnt_add(&sref_data.sc_dirty_zero, nr_dirty);
- syscnt_add(&sref_data.sc_revive, nr_revive);
- syscnt_add(&sref_data.sc_true_zero, nr_true);
+ syscnt_add(&sref_data.sc_dirty_zeroes, nr_dirty);
+ syscnt_add(&sref_data.sc_revives, nr_revive);
+ syscnt_add(&sref_data.sc_true_zeroes, nr_true);
spinlock_unlock(&sref_data.lock);
}
}
@@ -822,10 +822,10 @@ sref_bootstrap(void)
spinlock_init(&sref_data.lock);
sref_queue_init(&sref_data.queue0);
sref_queue_init(&sref_data.queue1);
- syscnt_register(&sref_data.sc_epoch, "sref_epoch");
- syscnt_register(&sref_data.sc_dirty_zero, "sref_dirty_zero");
- syscnt_register(&sref_data.sc_revive, "sref_revive");
- syscnt_register(&sref_data.sc_true_zero, "sref_true_zero");
+ syscnt_register(&sref_data.sc_epochs, "sref_epochs");
+ syscnt_register(&sref_data.sc_dirty_zeroes, "sref_dirty_zeroes");
+ syscnt_register(&sref_data.sc_revives, "sref_revives");
+ syscnt_register(&sref_data.sc_true_zeroes, "sref_true_zeroes");
sref_cache_init(sref_cache_get(), 0);
}
diff --git a/kern/syscnt.c b/kern/syscnt.c
index 907f226b..5c4c3608 100644
--- a/kern/syscnt.c
+++ b/kern/syscnt.c
@@ -60,7 +60,7 @@ syscnt_info(const char *prefix)
prefix_length = (prefix == NULL) ? 0 : strlen(prefix);
- printk("syscnt: name count\n");
+ printk("syscnt: name value\n");
mutex_lock(&syscnt_lock);
diff --git a/kern/thread.c b/kern/thread.c
index a27861f3..416be5eb 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -254,8 +254,8 @@ struct thread_runq {
/* Ticks before the next balancing attempt when a run queue is idle */
unsigned int idle_balance_ticks;
- struct syscnt sc_schedule_intr;
- struct syscnt sc_tick_intr;
+ struct syscnt sc_schedule_intrs;
+ struct syscnt sc_tick_intrs;
struct syscnt sc_boosts;
} __aligned(CPU_L1_SIZE);
@@ -451,10 +451,10 @@ thread_runq_init(struct thread_runq *runq, unsigned int cpu,
runq->balancer = NULL;
runq->idler = NULL;
runq->idle_balance_ticks = (unsigned int)-1;
- snprintf(name, sizeof(name), "thread_schedule_intr/%u", cpu);
- syscnt_register(&runq->sc_schedule_intr, name);
- snprintf(name, sizeof(name), "thread_tick_intr/%u", cpu);
- syscnt_register(&runq->sc_tick_intr, name);
+ snprintf(name, sizeof(name), "thread_schedule_intrs/%u", cpu);
+ syscnt_register(&runq->sc_schedule_intrs, name);
+ snprintf(name, sizeof(name), "thread_tick_intrs/%u", cpu);
+ syscnt_register(&runq->sc_tick_intrs, name);
snprintf(name, sizeof(name), "thread_boosts/%u", cpu);
syscnt_register(&runq->sc_boosts, name);
}
@@ -2414,7 +2414,7 @@ thread_schedule_intr(void)
assert(!thread_preempt_enabled());
runq = thread_runq_local();
- syscnt_inc(&runq->sc_schedule_intr);
+ syscnt_inc(&runq->sc_schedule_intrs);
}
void
@@ -2428,7 +2428,7 @@ thread_tick_intr(void)
assert(!thread_preempt_enabled());
runq = thread_runq_local();
- syscnt_inc(&runq->sc_tick_intr);
+ syscnt_inc(&runq->sc_tick_intrs);
llsync_report_periodic_event();
sref_report_periodic_event();
work_report_periodic_event();
diff --git a/kern/work.c b/kern/work.c
index 3783df42..f6309e79 100644
--- a/kern/work.c
+++ b/kern/work.c
@@ -91,7 +91,7 @@ struct work_pool {
struct work_queue queue0;
struct work_queue queue1;
struct work_thread *manager;
- struct syscnt sc_transfer;
+ struct syscnt sc_transfers;
unsigned int cpu;
unsigned int max_threads;
unsigned int nr_threads;
@@ -173,8 +173,8 @@ work_pool_init(struct work_pool *pool, unsigned int cpu, int flags)
} else {
nr_cpus = 1;
suffix = (flags & WORK_PF_HIGHPRIO) ? "h" : "";
- snprintf(name, sizeof(name), "work_transfer/%u%s", cpu, suffix);
- syscnt_register(&pool->sc_transfer, name);
+ snprintf(name, sizeof(name), "work_transfers/%u%s", cpu, suffix);
+ syscnt_register(&pool->sc_transfers, name);
pool->cpu = cpu;
}
@@ -275,7 +275,7 @@ work_pool_shift_queues(struct work_pool *pool, struct work_queue *old_queue)
work_queue_init(&pool->queue0);
if (work_queue_nr_works(old_queue) != 0) {
- syscnt_inc(&pool->sc_transfer);
+ syscnt_inc(&pool->sc_transfers);
}
}