summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-29 15:57:13 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 10:32:09 -0800
commitdff1672d9199fffddb58fa7970ccf59005fc35f3 (patch)
treea9740f65dfc203d183cb06ce0faeed0de5cef58b
parentfacc4e159672b4ed10aa18147bfa187b013c9505 (diff)
rcu: Keep invoking callbacks if CPU otherwise idle
The rcu_do_batch() function that invokes callbacks for TREE_RCU and TREE_PREEMPT_RCU normally throttles callback invocation to avoid degrading scheduling latency. However, as long as the CPU would otherwise be idle, there is no downside to continuing to invoke any callbacks that have passed through their grace periods. In fact, processing such callbacks in a timely manner has the benefit of increasing the probability that the CPU can enter the power-saving dyntick-idle mode. Therefore, this commit allows callback invocation to continue beyond the preset limit as long as the scheduler does not have some other task to run and as long as context is that of the idle task or the relevant RCU kthread. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree.c5
-rw-r--r--kernel/rcutree.h1
-rw-r--r--kernel/rcutree_plugin.h14
3 files changed, 19 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index c0ed3765ec3..4ec4b14cfba 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1403,7 +1403,10 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
debug_rcu_head_unqueue(list);
__rcu_reclaim(rsp->name, list);
list = next;
- if (++count >= bl)
+ /* Stop only if limit reached and CPU has something to do. */
+ if (++count >= bl &&
+ (need_resched() ||
+ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 9bcfbc9d16c..fddff92d667 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -455,6 +455,7 @@ static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
+static bool rcu_is_callbacks_kthread(void);
#ifdef CONFIG_RCU_BOOST
static void rcu_preempt_do_callbacks(void);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index dbcea6b93ae..adb6e666c6f 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1337,6 +1337,15 @@ static void invoke_rcu_callbacks_kthread(void)
}
/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return __get_cpu_var(rcu_cpu_kthread_task) == current;
+}
+
+/*
* Set the affinity of the boost kthread. The CPU-hotplug locks are
* held, so no one should be messing with the existence of the boost
* kthread.
@@ -1780,6 +1789,11 @@ static void invoke_rcu_callbacks_kthread(void)
WARN_ON_ONCE(1);
}
+static bool rcu_is_callbacks_kthread(void)
+{
+ return false;
+}
+
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}