diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-06-30 11:14:32 -0700 | 
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-07-17 14:59:01 -0700 | 
| commit | cf3620a6c7798be3395163d3bb863ab378a6aa80 (patch) | |
| tree | 439e5d6905d777fdd009c6a794f922a1c3837de7 /kernel | |
| parent | 2cd6ffafec066118365f6d7eb7a42ea16c1f032c (diff) | |
rcu: Add stall warnings to synchronize_sched_expedited()
Although synchronize_sched_expedited() historically has no RCU CPU stall
warnings, the availability of the rcupdate.rcu_expedited boot parameter
invalidates the old assumption that synchronize_sched()'s stall warnings
would suffice.  This commit therefore adds RCU CPU stall warnings to
synchronize_sched_expedited().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcu/tree.c | 58 | ||||
| -rw-r--r-- | kernel/rcu/tree.h | 1 | 
2 files changed, 55 insertions, 4 deletions
| diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e45097fc39fa..4b6594c7db58 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3369,16 +3369,65 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)  	return rnp1;  } +/* Invoked on each online non-idle CPU for expedited quiescent state. */  static int synchronize_sched_expedited_cpu_stop(void *data)  { -	struct rcu_state *rsp = data; +	struct rcu_data *rdp = data; +	struct rcu_state *rsp = rdp->rsp;  	/* We are here: If we are last, do the wakeup. */ +	rdp->exp_done = true;  	if (atomic_dec_and_test(&rsp->expedited_need_qs))  		wake_up(&rsp->expedited_wq);  	return 0;  } +static void synchronize_sched_expedited_wait(struct rcu_state *rsp) +{ +	int cpu; +	unsigned long jiffies_stall; +	unsigned long jiffies_start; +	struct rcu_data *rdp; +	int ret; + +	jiffies_stall = rcu_jiffies_till_stall_check(); +	jiffies_start = jiffies; + +	for (;;) { +		ret = wait_event_interruptible_timeout( +				rsp->expedited_wq, +				!atomic_read(&rsp->expedited_need_qs), +				jiffies_stall); +		if (ret > 0) +			return; +		if (ret < 0) { +			/* Hit a signal, disable CPU stall warnings. */ +			wait_event(rsp->expedited_wq, +				   !atomic_read(&rsp->expedited_need_qs)); +			return; +		} +		pr_err("INFO: %s detected expedited stalls on CPUs: {", +		       rsp->name); +		for_each_online_cpu(cpu) { +			rdp = per_cpu_ptr(rsp->rda, cpu); + +			if (rdp->exp_done) +				continue; +			pr_cont(" %d", cpu); +		} +		pr_cont(" } %lu jiffies s: %lu\n", +			jiffies - jiffies_start, rsp->expedited_sequence); +		for_each_online_cpu(cpu) { +			rdp = per_cpu_ptr(rsp->rda, cpu); + +			if (rdp->exp_done) +				continue; +			dump_cpu_task(cpu); +		} +		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; +	} +} +  /**   * synchronize_sched_expedited - Brute-force RCU-sched grace period   * @@ -3428,19 +3477,20 @@ void synchronize_sched_expedited(void)  		struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);  		struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); +		rdp->exp_done = false; +  		/* Skip our CPU and any idle CPUs. */  		if (raw_smp_processor_id() == cpu ||  		    !(atomic_add_return(0, &rdtp->dynticks) & 0x1))  			continue;  		atomic_inc(&rsp->expedited_need_qs);  		stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop, -				    rsp, &rdp->exp_stop_work); +				    rdp, &rdp->exp_stop_work);  	}  	/* Remove extra count and, if necessary, wait for CPUs to stop. */  	if (!atomic_dec_and_test(&rsp->expedited_need_qs)) -		wait_event(rsp->expedited_wq, -			   !atomic_read(&rsp->expedited_need_qs)); +		synchronize_sched_expedited_wait(rsp);  	rcu_exp_gp_seq_end(rsp);  	mutex_unlock(&rnp->exp_funnel_mutex); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index efee84ce1e08..b3ae8d3cffbc 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -370,6 +370,7 @@ struct rcu_data {  	struct rcu_head oom_head;  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */  	struct mutex exp_funnel_mutex; +	bool exp_done;			/* Expedited QS for this CPU? */  	/* 7) Callback offloading. */  #ifdef CONFIG_RCU_NOCB_CPU | 
