summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-06-25 21:48:34 +0200
committerRichard Braun <rbraun@sceen.net>2018-06-25 21:52:26 +0200
commitbb91d0a376a71ef2c0d01a741400c367ac1a2ccf (patch)
treef7dd37b46d754bd9e3f7acb13957e2ace6a7b43e /test
parent64d74fe8d76c230e61b17482bb098d7f9729141d (diff)
kern/perfmon: new module
Diffstat (limited to 'test')
-rw-r--r--test/Kconfig12
-rw-r--r--test/Makefile3
-rw-r--r--test/test_perfmon_cpu.c225
-rw-r--r--test/test_perfmon_thread.c383
-rw-r--r--test/test_perfmon_torture.c346
5 files changed, 969 insertions, 0 deletions
diff --git a/test/Kconfig b/test/Kconfig
index 3f1c3b69..9f0faf44 100644
--- a/test/Kconfig
+++ b/test/Kconfig
@@ -34,6 +34,18 @@ config TEST_MODULE_MUTEX
config TEST_MODULE_MUTEX_PI
bool "mutex_pi"
+config TEST_MODULE_PERFMON_CPU
+ bool "perfmon_cpu"
+ depends on PERFMON
+
+config TEST_MODULE_PERFMON_THREAD
+ bool "perfmon_thread"
+ depends on PERFMON
+
+config TEST_MODULE_PERFMON_TORTURE
+ bool "perfmon_torture"
+ depends on PERFMON
+
config TEST_MODULE_PMAP_UPDATE_MP
bool "pmap_update_mp"
diff --git a/test/Makefile b/test/Makefile
index cdce6130..76edbf0e 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -2,6 +2,9 @@ x15_SOURCES-$(CONFIG_TEST_MODULE_ATOMIC) += test/test_atomic.c
x15_SOURCES-$(CONFIG_TEST_MODULE_BULLETIN) += test/test_bulletin.c
x15_SOURCES-$(CONFIG_TEST_MODULE_MUTEX) += test/test_mutex.c
x15_SOURCES-$(CONFIG_TEST_MODULE_MUTEX_PI) += test/test_mutex_pi.c
+x15_SOURCES-$(CONFIG_TEST_MODULE_PERFMON_CPU) += test/test_perfmon_cpu.c
+x15_SOURCES-$(CONFIG_TEST_MODULE_PERFMON_THREAD) += test/test_perfmon_thread.c
+x15_SOURCES-$(CONFIG_TEST_MODULE_PERFMON_TORTURE) += test/test_perfmon_torture.c
x15_SOURCES-$(CONFIG_TEST_MODULE_PMAP_UPDATE_MP) += test/test_pmap_update_mp.c
x15_SOURCES-$(CONFIG_TEST_MODULE_RCU_DEFER) += test/test_rcu_defer.c
x15_SOURCES-$(CONFIG_TEST_MODULE_SREF_DIRTY_ZEROES) += test/test_sref_dirty_zeroes.c
diff --git a/test/test_perfmon_cpu.c b/test/test_perfmon_cpu.c
new file mode 100644
index 00000000..75f69d3f
--- /dev/null
+++ b/test/test_perfmon_cpu.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2014-2018 Remy Noel.
+ * Copyright (c) 2014-2018 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This test checks the behavior of performance monitoring on a CPU.
+ * It creates a group with two events, cycle and instruction, and attaches
+ * that group to CPU1, where a thread is bound and runs a tight loop to
+ * make sure the target CPU is never idle. After some time, the measurement
+ * stops and values are reported.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <kern/atomic.h>
+#include <kern/clock.h>
+#include <kern/cpumap.h>
+#include <kern/error.h>
+#include <kern/list.h>
+#include <kern/log.h>
+#include <kern/panic.h>
+#include <kern/perfmon.h>
+#include <kern/thread.h>
+#include <machine/cpu.h>
+#include <test/test.h>
+
+#define TEST_WAIT_DELAY_MS 1000
+
+/*
+ * Using another CPU than the BSP as the monitored CPU checks that PMUs are
+ * correctly initialized on APs.
+ */
+#define TEST_CONTROL_CPU 0
+#define TEST_MONITORED_CPU (TEST_CONTROL_CPU + 1)
+#define TEST_MIN_CPUS (TEST_MONITORED_CPU + 1)
+
+#define TEST_EVENT_NAME_MAX_SIZE 32
+
+struct test_event {
+ struct list node;
+ struct perfmon_event pm_event;
+ char name[TEST_EVENT_NAME_MAX_SIZE];
+};
+
+struct test_group {
+ struct list events;
+};
+
+static unsigned int test_run_stop;
+
+static void
+test_wait(void)
+{
+ thread_delay(clock_ticks_from_ms(TEST_WAIT_DELAY_MS), false);
+}
+
+static void
+test_event_init(struct test_event *event, unsigned int id, const char *name)
+{
+ int error;
+
+ error = perfmon_event_init(&event->pm_event, id, PERFMON_EF_KERN);
+ error_check(error, "perfmon_event_init");
+ strlcpy(event->name, name, sizeof(event->name));
+}
+
+static void
+test_event_report(struct test_event *event)
+{
+ uint64_t count;
+ int error;
+
+ count = perfmon_event_read(&event->pm_event);
+ error = (count == 0) ? EINVAL : 0;
+ error_check(error, __func__);
+ log_info("test: %s: %llu", event->name, (unsigned long long)count);
+}
+
+static void
+test_event_attach_cpu(struct test_event *event, unsigned int cpu)
+{
+ int error;
+
+ error = perfmon_event_attach_cpu(&event->pm_event, cpu);
+ error_check(error, "perfmon_event_attach_cpu");
+}
+
+static void
+test_event_detach(struct test_event *event)
+{
+ int error;
+
+ error = perfmon_event_detach(&event->pm_event);
+ error_check(error, "perfmon_event_detach");
+}
+
+static void
+test_group_init(struct test_group *group)
+{
+ list_init(&group->events);
+}
+
+static void
+test_group_add(struct test_group *group, struct test_event *event)
+{
+ list_insert_tail(&group->events, &event->node);
+}
+
+static void
+test_group_attach_cpu(struct test_group *group, unsigned int cpu)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_attach_cpu(event, cpu);
+ }
+}
+
+static void
+test_group_detach(struct test_group *group)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_detach(event);
+ }
+}
+
+static void
+test_group_report(struct test_group *group)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_report(event);
+ }
+}
+
+static void
+test_run(void *arg)
+{
+ unsigned int stop;
+
+ (void)arg;
+
+ do {
+ stop = atomic_load(&test_run_stop, ATOMIC_RELAXED);
+ } while (!stop);
+}
+
+static void
+test_control(void *arg)
+{
+ struct test_event cycle, instruction;
+ struct test_group group;
+ struct thread *thread;
+
+ thread = arg;
+
+ test_event_init(&cycle, PERFMON_EV_CYCLE, "cycle");
+ test_event_init(&instruction, PERFMON_EV_INSTRUCTION, "instruction");
+ test_group_init(&group);
+ test_group_add(&group, &cycle);
+ test_group_add(&group, &instruction);
+ test_group_attach_cpu(&group, TEST_MONITORED_CPU);
+ test_wait();
+ test_group_report(&group);
+ test_wait();
+ test_group_detach(&group);
+ test_group_report(&group);
+
+ atomic_store(&test_run_stop, 1, ATOMIC_RELAXED);
+ thread_join(thread);
+ log_info("test: done");
+}
+
+void
+test_setup(void)
+{
+ struct thread *thread;
+ struct thread_attr attr;
+ struct cpumap *cpumap;
+ int error;
+
+ if (cpu_count() < TEST_MIN_CPUS) {
+ panic("test: %u processors required", TEST_MIN_CPUS);
+ }
+
+ error = cpumap_create(&cpumap);
+ error_check(error, "cpumap_create");
+
+ thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_run");
+ cpumap_zero(cpumap);
+ cpumap_set(cpumap, TEST_MONITORED_CPU);
+ thread_attr_set_cpumap(&attr, cpumap);
+ error = thread_create(&thread, &attr, test_run, NULL);
+ error_check(error, "thread_create");
+
+ thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_control");
+ thread_attr_set_detached(&attr);
+ cpumap_zero(cpumap);
+ cpumap_set(cpumap, TEST_CONTROL_CPU);
+ thread_attr_set_cpumap(&attr, cpumap);
+ error = thread_create(NULL, &attr, test_control, thread);
+ error_check(error, "thread_create");
+
+ cpumap_destroy(cpumap);
+}
diff --git a/test/test_perfmon_thread.c b/test/test_perfmon_thread.c
new file mode 100644
index 00000000..0213777b
--- /dev/null
+++ b/test/test_perfmon_thread.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2014-2018 Remy Noel.
+ * Copyright (c) 2014-2018 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This test checks the behavior of performance monitoring on a thread.
+ * It creates a group with a single event, cycle, and attaches that group to
+ * a runner thread. Two checks are then performed :
+ * - the first makes sure the number of cycles changes when the runner
+ * thread is running
+ * - the second makes sure the number of cycles doesn't change when the
+ * runner thread is sleeping
+ *
+ * Another group with a cycle event is created and attached to CPU0 to make
+ * sure that a shared event is correctly handled, and the runner thread is
+ * bound to CPU0 to force sharing. A third thread is created to fill CPU0
+ * time with cycles so that the cycle counter of the CPU-attached group
+ * changes while the runner thread is sleeping.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <kern/atomic.h>
+#include <kern/clock.h>
+#include <kern/condition.h>
+#include <kern/cpumap.h>
+#include <kern/error.h>
+#include <kern/kmem.h>
+#include <kern/list.h>
+#include <kern/log.h>
+#include <kern/mutex.h>
+#include <kern/panic.h>
+#include <kern/perfmon.h>
+#include <kern/thread.h>
+#include <test/test.h>
+
+#define TEST_WAIT_DELAY_MS 1000
+
+#define TEST_EVENT_NAME_MAX_SIZE 32
+
+struct test_event {
+ struct list node;
+ struct perfmon_event pm_event;
+ uint64_t last_value;
+ char name[TEST_EVENT_NAME_MAX_SIZE];
+};
+
+struct test_group {
+ struct list events;
+};
+
+enum test_state {
+ TEST_STATE_RUNNING,
+ TEST_STATE_SUSPENDED,
+ TEST_STATE_TERMINATED,
+};
+
+static struct condition test_condition;
+static struct mutex test_mutex;
+static enum test_state test_state;
+
+static void
+test_wait(void)
+{
+ log_info("test: controller waiting");
+ thread_delay(clock_ticks_from_ms(TEST_WAIT_DELAY_MS), false);
+ log_info("test: controller resuming");
+}
+
+static void
+test_event_init(struct test_event *event, unsigned int id, const char *name)
+{
+ int error;
+
+ error = perfmon_event_init(&event->pm_event, id, PERFMON_EF_KERN);
+ error_check(error, "perfmon_event_init");
+ strlcpy(event->name, name, sizeof(event->name));
+}
+
+static void
+test_event_attach(struct test_event *event, struct thread *thread)
+{
+ int error;
+
+ error = perfmon_event_attach(&event->pm_event, thread);
+ error_check(error, "perfmon_event_attach");
+}
+
+static void
+test_event_attach_cpu(struct test_event *event, unsigned int cpu)
+{
+ int error;
+
+ error = perfmon_event_attach_cpu(&event->pm_event, cpu);
+ error_check(error, "perfmon_event_attach_cpu");
+}
+
+static void
+test_event_detach(struct test_event *event)
+{
+ int error;
+
+ error = perfmon_event_detach(&event->pm_event);
+ error_check(error, "perfmon_event_detach");
+}
+
+static uint64_t
+test_event_read(struct test_event *event)
+{
+ uint64_t value;
+
+ value = perfmon_event_read(&event->pm_event);
+ log_info("test: %s: %llu", event->name, (unsigned long long)value);
+ return value;
+}
+
+static void
+test_event_save(struct test_event *event)
+{
+ event->last_value = test_event_read(event);
+}
+
+static void
+test_event_check(struct test_event *event, bool change_expected)
+{
+ uint64_t value;
+ bool changed;
+
+ value = test_event_read(event);
+ changed = (value != event->last_value);
+
+ if (changed != change_expected) {
+ panic("test: invalid value");
+ }
+
+ event->last_value = value;
+}
+
+static void
+test_group_init(struct test_group *group)
+{
+ list_init(&group->events);
+}
+
+static void
+test_group_add(struct test_group *group, struct test_event *event)
+{
+ list_insert_tail(&group->events, &event->node);
+}
+
+static void
+test_group_attach(struct test_group *group, struct thread *thread)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_attach(event, thread);
+ }
+}
+
+static void
+test_group_attach_cpu(struct test_group *group, unsigned int cpu)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_attach_cpu(event, cpu);
+ }
+}
+
+static void
+test_group_detach(struct test_group *group)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_detach(event);
+ }
+}
+
+static void
+test_group_save(struct test_group *group)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_save(event);
+ }
+}
+
+static void
+test_group_check(struct test_group *group, bool change_expected)
+{
+ struct test_event *event;
+
+ list_for_each_entry(&group->events, event, node) {
+ test_event_check(event, change_expected);
+ }
+}
+
+static void
+test_run(void *arg)
+{
+ bool report;
+
+ (void)arg;
+
+ report = true;
+
+ mutex_lock(&test_mutex);
+
+ while (test_state != TEST_STATE_TERMINATED) {
+ if (test_state == TEST_STATE_SUSPENDED) {
+ log_info("test: runner suspended");
+ report = true;
+ condition_wait(&test_condition, &test_mutex);
+ } else {
+ mutex_unlock(&test_mutex);
+
+ if (report) {
+ log_info("test: runner running");
+ report = false;
+ }
+
+ mutex_lock(&test_mutex);
+ }
+ }
+
+ mutex_unlock(&test_mutex);
+}
+
+static void
+test_fill(void *arg)
+{
+ enum test_state state;
+
+ (void)arg;
+
+ do {
+ state = atomic_load(&test_state, ATOMIC_RELAXED);
+ } while (state != TEST_STATE_TERMINATED);
+}
+
+static void
+test_wait_state(const struct thread *thread, unsigned short state)
+{
+ for (;;) {
+ if (thread_state(thread) == state) {
+ break;
+ }
+
+ thread_delay(1, false);
+ }
+}
+
+static void
+test_resume(struct thread *thread)
+{
+ test_wait_state(thread, THREAD_SLEEPING);
+
+ mutex_lock(&test_mutex);
+ assert(test_state == TEST_STATE_SUSPENDED);
+ atomic_store(&test_state, TEST_STATE_RUNNING, ATOMIC_RELAXED);
+ condition_signal(&test_condition);
+ mutex_unlock(&test_mutex);
+
+ test_wait_state(thread, THREAD_RUNNING);
+}
+
+static void
+test_suspend(struct thread *thread)
+{
+ test_wait_state(thread, THREAD_RUNNING);
+
+ mutex_lock(&test_mutex);
+ assert(test_state == TEST_STATE_RUNNING);
+ atomic_store(&test_state, TEST_STATE_SUSPENDED, ATOMIC_RELAXED);
+ mutex_unlock(&test_mutex);
+
+ test_wait_state(thread, THREAD_SLEEPING);
+}
+
+static void
+test_terminate(void)
+{
+ mutex_lock(&test_mutex);
+ test_state = TEST_STATE_TERMINATED;
+ condition_signal(&test_condition);
+ mutex_unlock(&test_mutex);
+}
+
+static void
+test_control(void *arg)
+{
+ struct test_event thread_cycle, cpu_cycle;
+ struct test_group thread_group, cpu_group;
+ struct thread *runner;
+
+ runner = arg;
+
+ test_event_init(&thread_cycle, PERFMON_EV_CYCLE, "thread_cycle");
+ test_group_init(&thread_group);
+ test_group_add(&thread_group, &thread_cycle);
+
+ test_event_init(&cpu_cycle, PERFMON_EV_CYCLE, "cpu_cycle");
+ test_group_init(&cpu_group);
+ test_group_add(&cpu_group, &cpu_cycle);
+
+ test_group_attach(&thread_group, runner);
+ test_group_attach_cpu(&cpu_group, 0);
+
+ test_group_save(&thread_group);
+ test_group_save(&cpu_group);
+ test_resume(runner);
+ test_wait();
+ test_suspend(runner);
+ test_group_check(&thread_group, true);
+ test_group_check(&cpu_group, true);
+ test_wait();
+ test_group_check(&thread_group, false);
+ test_group_check(&cpu_group, true);
+ test_terminate();
+
+ test_group_detach(&cpu_group);
+ test_group_detach(&thread_group);
+
+ thread_join(runner);
+ log_info("test: done");
+}
+
+void
+test_setup(void)
+{
+ struct thread_attr attr;
+ struct thread *runner;
+ struct cpumap *cpumap;
+ int error;
+
+ condition_init(&test_condition);
+ mutex_init(&test_mutex);
+ test_state = TEST_STATE_SUSPENDED;
+
+ error = cpumap_create(&cpumap);
+ error_check(error, "cpumap_create");
+
+ cpumap_zero(cpumap);
+ cpumap_set(cpumap, 0);
+
+ thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_run");
+ thread_attr_set_cpumap(&attr, cpumap);
+ error = thread_create(&runner, &attr, test_run, NULL);
+ error_check(error, "thread_create");
+
+ thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_fill");
+ thread_attr_set_detached(&attr);
+ thread_attr_set_cpumap(&attr, cpumap);
+ thread_attr_set_priority(&attr, THREAD_SCHED_FS_PRIO_MIN);
+ error = thread_create(NULL, &attr, test_fill, NULL);
+ error_check(error, "thread_create");
+
+ thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_control");
+ thread_attr_set_detached(&attr);
+ error = thread_create(NULL, &attr, test_control, runner);
+ error_check(error, "thread_create");
+
+ cpumap_destroy(cpumap);
+}
diff --git a/test/test_perfmon_torture.c b/test/test_perfmon_torture.c
new file mode 100644
index 00000000..171cb99c
--- /dev/null
+++ b/test/test_perfmon_torture.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2014-2018 Remy Noel.
+ * Copyright (c) 2014-2018 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This module is a stress test, expected to never terminate, of the
+ * performance monitoring module. It creates a control thread which
+ * maintains a couple of test threads running while toggling performance
+ * monitoring on them, attempting to produce many regular and corner
+ * cases. In particular, the thread pool is randomly resized by destroying
+ * and creating the underlying kernel threads.
+ *
+ * The control thread regularly prints some stats about the thread pool
+ * and the associated performance monitoring events to report that it's
+ * making progress.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include <kern/atomic.h>
+#include <kern/clock.h>
+#include <kern/error.h>
+#include <kern/kmem.h>
+#include <kern/log.h>
+#include <kern/panic.h>
+#include <kern/perfmon.h>
+#include <kern/thread.h>
+#include <test/test.h>
+
+struct test_thread {
+ unsigned int id;
+ struct thread *thread;
+ struct perfmon_event event;
+ unsigned int must_stop;
+ bool monitored;
+ unsigned long long count;
+};
+
+struct test_controller {
+ struct test_thread **threads;
+ unsigned int nr_threads;
+ unsigned int monitoring_lid;
+ unsigned int state_lid;
+ unsigned long nr_current_events;
+ unsigned long nr_total_events;
+ unsigned long nr_current_threads;
+ unsigned long nr_total_threads;
+};
+
+#define TEST_WAIT_DELAY_MS 100
+#define TEST_LOOPS_PER_PRINT 20
+
+#define TEST_MONITORING_SEED 12345
+#define TEST_STATE_SEED 23456
+
+static void
+test_wait(void)
+{
+ thread_delay(clock_ticks_from_ms(TEST_WAIT_DELAY_MS), false);
+}
+
+static unsigned int
+test_rand(unsigned int x)
+{
+ /* Basic 32-bit xorshift PRNG */
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ return x;
+}
+
+static bool
+test_thread_monitored(const struct test_thread *thread)
+{
+ return thread->monitored;
+}
+
+static void
+test_thread_start_monitoring(struct test_thread *thread)
+{
+ int error;
+
+ error = perfmon_event_attach(&thread->event, thread->thread);
+ error_check(error, __func__);
+ thread->monitored = true;
+}
+
+static void
+test_thread_stop_monitoring(struct test_thread *thread)
+{
+ int error;
+
+ thread->count += perfmon_event_read(&thread->event);
+ error = perfmon_event_detach(&thread->event);
+ error_check(error, __func__);
+ thread->monitored = false;
+}
+
+static void
+test_thread_report(const struct test_thread *thread)
+{
+ log_info("test: thread:%u count:%llu", thread->id, thread->count);
+}
+
+static void
+test_run(void *arg)
+{
+ struct test_thread *thread;
+
+ thread = arg;
+
+ for (;;) {
+ if (atomic_load(&thread->must_stop, ATOMIC_RELAXED)) {
+ break;
+ }
+ }
+}
+
+static bool
+test_thread_started(const struct test_thread *thread)
+{
+ return thread->thread;
+}
+
+static void
+test_thread_start(struct test_thread *thread)
+{
+ char name[THREAD_NAME_SIZE];
+ struct thread_attr attr;
+ int error;
+
+ assert(!thread->monitored);
+
+ if (test_thread_started(thread)) {
+ return;
+ }
+
+ thread->must_stop = 0;
+
+ snprintf(name, sizeof(name),
+ THREAD_KERNEL_PREFIX "test_run:%u", thread->id);
+ thread_attr_init(&attr, name);
+ error = thread_create(&thread->thread, &attr, test_run, thread);
+ error_check(error, "thread_create");
+}
+
+static void
+test_thread_request_stop(struct test_thread *thread)
+{
+ atomic_store(&thread->must_stop, 1, ATOMIC_RELAXED);
+}
+
+static void
+test_thread_join(struct test_thread *thread)
+{
+ assert(test_thread_started(thread));
+ assert(!test_thread_monitored(thread));
+
+ thread_join(thread->thread);
+ thread->thread = NULL;
+}
+
+static struct test_thread *
+test_thread_create(unsigned int id)
+{
+ struct test_thread *thread;
+
+ thread = kmem_alloc(sizeof(*thread));
+
+ if (thread == NULL) {
+ panic("thread allocation failed");
+ }
+
+ thread->id = id;
+ thread->thread = NULL;
+ thread->must_stop = 0;
+ thread->monitored = false;
+ thread->count = 0;
+
+ perfmon_event_init(&thread->event, PERFMON_EV_CYCLE, PERFMON_EF_KERN);
+ test_thread_start(thread);
+
+ return thread;
+}
+
+static struct test_thread *
+test_controller_get(struct test_controller *controller, unsigned int id)
+{
+ assert(id < controller->nr_threads);
+ return controller->threads[id];
+}
+
+static struct test_thread *
+test_controller_get_by_lid(struct test_controller *controller, unsigned int lid)
+{
+ return test_controller_get(controller, lid % controller->nr_threads);
+}
+
+static void
+test_toggle_monitoring(struct test_controller *controller,
+ struct test_thread *thread)
+{
+ if (!test_thread_started(thread)) {
+ return;
+ }
+
+ if (thread->monitored) {
+ test_thread_stop_monitoring(thread);
+ controller->nr_current_events--;
+ } else {
+ test_thread_start_monitoring(thread);
+ controller->nr_total_events++;
+ controller->nr_current_events++;
+ }
+}
+
+static void
+test_toggle_state(struct test_controller *controller,
+ struct test_thread *thread)
+{
+ if (test_thread_started(thread)) {
+ /*
+ * Make the thread stop asynchronously with monitoring to test
+ * thread referencing.
+ */
+ test_thread_request_stop(thread);
+
+ if (test_thread_monitored(thread)) {
+ test_thread_stop_monitoring(thread);
+ controller->nr_current_events--;
+ }
+
+ test_thread_join(thread);
+ controller->nr_current_threads--;
+ } else {
+ test_thread_start(thread);
+ controller->nr_total_threads++;
+ controller->nr_current_threads++;
+ }
+}
+
+static void
+test_controller_report(struct test_controller *controller)
+{
+ log_info("test: events:%lu total:%lu threads:%lu total:%lu",
+ controller->nr_current_events, controller->nr_total_events,
+ controller->nr_current_threads, controller->nr_total_threads);
+
+ for (unsigned int i = 0; i < controller->nr_threads; i++) {
+ test_thread_report(test_controller_get(controller, i));
+ }
+}
+
+static void
+test_control(void *arg)
+{
+ struct test_controller *controller;
+ struct test_thread *thread;
+
+ controller = arg;
+
+ log_info("test: %u threads", controller->nr_threads);
+
+ for (unsigned long nr_loops = 1; /* no condition */; nr_loops++) {
+ controller->monitoring_lid = test_rand(controller->monitoring_lid);
+ thread = test_controller_get_by_lid(controller,
+ controller->monitoring_lid);
+ test_toggle_monitoring(controller, thread);
+
+ controller->state_lid = test_rand(controller->state_lid);
+ thread = test_controller_get_by_lid(controller,
+ controller->state_lid);
+ test_toggle_state(controller, thread);
+
+ test_wait();
+
+ if ((nr_loops % TEST_LOOPS_PER_PRINT) == 0) {
+ test_controller_report(controller);
+ }
+ }
+}
+
+static void
+test_controller_create(void)
+{
+ struct test_controller *controller;
+ struct thread_attr attr;
+ int error;
+
+ controller = kmem_alloc(sizeof(*controller));
+
+ if (!controller) {
+ panic("test: unable to create controller");
+ }
+
+ /*
+ * At least two threads are required by the monitoring/state toggling
+ * operations, otherwise they always apply to the same thread, severely
+ * restricting their usefulness.
+ */
+ controller->nr_threads = MAX(cpu_count() - 1, 2);
+ controller->threads = kmem_alloc(controller->nr_threads
+ * sizeof(*controller->threads));
+
+ if (!controller->threads) {
+ panic("test: unable to allocate thread array");
+ }
+
+ for (unsigned int i = 0; i < controller->nr_threads; i++) {
+ controller->threads[i] = test_thread_create(i);
+ }
+
+ controller->monitoring_lid = TEST_MONITORING_SEED;
+ controller->state_lid = TEST_STATE_SEED;
+ controller->nr_current_events = 0;
+ controller->nr_total_events = 0;
+ controller->nr_current_threads = controller->nr_threads;
+ controller->nr_total_threads = controller->nr_threads;
+
+ thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_control");
+ thread_attr_set_detached(&attr);
+ error = thread_create(NULL, &attr, test_control, controller);
+ error_check(error, "thread_create");
+}
+
+void
+test_setup(void)
+{
+ test_controller_create();
+}