diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/context_tracking.c | 10 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 8 |
2 files changed, 9 insertions, 9 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 868ae0bcd4be..5cfdfc03b401 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -28,7 +28,7 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = { #ifdef CONFIG_CONTEXT_TRACKING_IDLE - .dynticks_nesting = 1, + .nesting = 1, .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, #endif .state = ATOMIC_INIT(CT_RCU_WATCHING), @@ -131,7 +131,7 @@ static void noinstr ct_kernel_exit(bool user, int offset) ct_dynticks_nesting() == 0); if (ct_dynticks_nesting() != 1) { // RCU will still be watching, so just do accounting and leave. - ct->dynticks_nesting--; + ct->nesting--; return; } @@ -145,7 +145,7 @@ static void noinstr ct_kernel_exit(bool user, int offset) instrument_atomic_write(&ct->state, sizeof(ct->state)); instrumentation_end(); - WRITE_ONCE(ct->dynticks_nesting, 0); /* Avoid irq-access tearing. */ + WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */ // RCU is watching here ... ct_kernel_exit_state(offset); // ... but is no longer watching here. @@ -170,7 +170,7 @@ static void noinstr ct_kernel_enter(bool user, int offset) WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); if (oldval) { // RCU was already watching, so just do accounting and leave. - ct->dynticks_nesting++; + ct->nesting++; return; } rcu_dynticks_task_exit(); @@ -184,7 +184,7 @@ static void noinstr ct_kernel_enter(bool user, int offset) trace_rcu_dyntick(TPS("End"), ct_dynticks_nesting(), 1, ct_rcu_watching()); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); - WRITE_ONCE(ct->dynticks_nesting, 1); + WRITE_ONCE(ct->nesting, 1); WARN_ON_ONCE(ct_dynticks_nmi_nesting()); WRITE_ONCE(ct->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); instrumentation_end(); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 45a9f3667c2a..0d5a539acd58 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -389,7 +389,7 @@ static int rcu_is_cpu_rrupt_from_idle(void) /* Check for counter underflows */ RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0, - "RCU dynticks_nesting counter underflow!"); + "RCU nesting counter underflow!"); RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0, "RCU dynticks_nmi_nesting counter underflow/zero!"); @@ -597,7 +597,7 @@ void rcu_irq_exit_check_preempt(void) lockdep_assert_irqs_disabled(); RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0, - "RCU dynticks_nesting counter underflow/zero!"); + "RCU nesting counter underflow/zero!"); RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() != DYNTICK_IRQ_NONIDLE, "Bad RCU dynticks_nmi_nesting counter\n"); @@ -4804,7 +4804,7 @@ rcu_boot_init_percpu_data(int cpu) /* Set up local state, ensuring consistent view of global state. */ rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); INIT_WORK(&rdp->strict_work, strict_work_handler); - WARN_ON_ONCE(ct->dynticks_nesting != 1); + WARN_ON_ONCE(ct->nesting != 1); WARN_ON_ONCE(rcu_dynticks_in_eqs(ct_rcu_watching_cpu(cpu))); rdp->barrier_seq_snap = rcu_state.barrier_sequence; rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; @@ -4898,7 +4898,7 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); rdp->blimit = blimit; - ct->dynticks_nesting = 1; /* CPU not up, no tearing. */ + ct->nesting = 1; /* CPU not up, no tearing. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ /* |