diff options
Diffstat (limited to 'kernel/softirq.c')
| -rw-r--r-- | kernel/softirq.c | 72 | 
1 files changed, 22 insertions, 50 deletions
| diff --git a/kernel/softirq.c b/kernel/softirq.c index 36e061740047..c506f266a6b9 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -131,23 +131,17 @@ void _local_bh_enable(void)  EXPORT_SYMBOL(_local_bh_enable); -void local_bh_enable(void) +static inline void _local_bh_enable_ip(unsigned long ip)  { +	WARN_ON_ONCE(in_irq() || irqs_disabled());  #ifdef CONFIG_TRACE_IRQFLAGS -	unsigned long flags; - -	WARN_ON_ONCE(in_irq()); -#endif -	WARN_ON_ONCE(irqs_disabled()); - -#ifdef CONFIG_TRACE_IRQFLAGS -	local_irq_save(flags); +	local_irq_disable();  #endif  	/*  	 * Are softirqs going to be turned on now:  	 */  	if (softirq_count() == SOFTIRQ_OFFSET) -		trace_softirqs_on((unsigned long)__builtin_return_address(0)); +		trace_softirqs_on(ip);  	/*  	 * Keep preemption disabled until we are done with  	 * softirq processing: @@ -159,40 +153,20 @@ void local_bh_enable(void)  	dec_preempt_count();  #ifdef CONFIG_TRACE_IRQFLAGS -	local_irq_restore(flags); +	local_irq_enable();  #endif  	preempt_check_resched();  } + +void local_bh_enable(void) +{ +	_local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +}  EXPORT_SYMBOL(local_bh_enable);  void local_bh_enable_ip(unsigned long ip)  { -#ifdef CONFIG_TRACE_IRQFLAGS -	unsigned long flags; - -	WARN_ON_ONCE(in_irq()); - -	local_irq_save(flags); -#endif -	/* -	 * Are softirqs going to be turned on now: -	 */ -	if (softirq_count() == SOFTIRQ_OFFSET) -		trace_softirqs_on(ip); -	/* -	 * Keep preemption disabled until we are done with -	 * softirq processing: - 	 */ - 	sub_preempt_count(SOFTIRQ_OFFSET - 1); - -	if (unlikely(!in_interrupt() && local_softirq_pending())) -		do_softirq(); - -	dec_preempt_count(); -#ifdef CONFIG_TRACE_IRQFLAGS -	local_irq_restore(flags); -#endif -	preempt_check_resched(); +	_local_bh_enable_ip(ip);  }  EXPORT_SYMBOL(local_bh_enable_ip); @@ -312,7 +286,7 @@ void irq_exit(void)  #ifdef CONFIG_NO_HZ  	/* Make sure that timer wheel updates are propagated */  	if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) -		tick_nohz_stop_sched_tick(); +		tick_nohz_stop_sched_tick(0);  	rcu_irq_exit();  #endif  	preempt_enable_no_resched(); @@ -347,9 +321,8 @@ void raise_softirq(unsigned int nr)  	local_irq_restore(flags);  } -void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) +void open_softirq(int nr, void (*action)(struct softirq_action *))  { -	softirq_vec[nr].data = data;  	softirq_vec[nr].action = action;  } @@ -360,10 +333,8 @@ struct tasklet_head  	struct tasklet_struct **tail;  }; -/* Some compilers disobey section attribute on statics when not -   initialized -- RR */ -static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; -static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; +static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); +static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);  void __tasklet_schedule(struct tasklet_struct *t)  { @@ -503,8 +474,8 @@ void __init softirq_init(void)  			&per_cpu(tasklet_hi_vec, cpu).head;  	} -	open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL); -	open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL); +	open_softirq(TASKLET_SOFTIRQ, tasklet_action); +	open_softirq(HI_SOFTIRQ, tasklet_hi_action);  }  static int ksoftirqd(void * __bind_cpu) @@ -645,7 +616,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,  		p = per_cpu(ksoftirqd, hotcpu);  		per_cpu(ksoftirqd, hotcpu) = NULL; -		sched_setscheduler(p, SCHED_FIFO, ¶m); +		sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);  		kthread_stop(p);  		takeover_tasklets(hotcpu);  		break; @@ -659,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {  	.notifier_call = cpu_callback  }; -__init int spawn_ksoftirqd(void) +static __init int spawn_ksoftirqd(void)  {  	void *cpu = (void *)(long)smp_processor_id();  	int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); @@ -669,17 +640,18 @@ __init int spawn_ksoftirqd(void)  	register_cpu_notifier(&cpu_nfb);  	return 0;  } +early_initcall(spawn_ksoftirqd);  #ifdef CONFIG_SMP  /*   * Call a function on all processors   */ -int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait) +int on_each_cpu(void (*func) (void *info), void *info, int wait)  {  	int ret = 0;  	preempt_disable(); -	ret = smp_call_function(func, info, retry, wait); +	ret = smp_call_function(func, info, wait);  	local_irq_disable();  	func(info);  	local_irq_enable(); | 
