diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/trace/trace.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 10 | 
2 files changed, 4 insertions, 16 deletions
| diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1cd2e8143bb4..caa4051ce778 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)  {  	struct trace_array *tr = &global_trace;  	struct trace_array_cpu *data; -	long disabled;  	int cpu;  	int pc; @@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)  	preempt_disable_notrace();  	cpu = raw_smp_processor_id();  	data = tr->data[cpu]; -	disabled = atomic_inc_return(&data->disabled); -	if (likely(disabled == 1)) +	if (likely(!atomic_read(&data->disabled)))  		ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); -	atomic_dec(&data->disabled);  	preempt_enable_notrace();  } @@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)  	struct trace_array_cpu *data;  	struct print_entry *entry;  	unsigned long flags, irq_flags; -	long disabled;  	int cpu, len = 0, size, pc;  	if (!tr->ctrl || tracing_disabled) @@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)  	preempt_disable_notrace();  	cpu = raw_smp_processor_id();  	data = tr->data[cpu]; -	disabled = atomic_inc_return(&data->disabled); -	if (unlikely(disabled != 1)) +	if (unlikely(atomic_read(&data->disabled)))  		goto out;  	spin_lock_irqsave(&trace_buf_lock, flags); @@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)  	spin_unlock_irqrestore(&trace_buf_lock, flags);   out: -	atomic_dec(&data->disabled);  	preempt_enable_notrace();  	return len; diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index c7fa08a5b7f4..b8f56beb1a62 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,  {  	struct trace_array_cpu *data;  	unsigned long flags; -	long disabled;  	int cpu;  	int pc; @@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,  	local_irq_save(flags);  	cpu = raw_smp_processor_id();  	data = ctx_trace->data[cpu]; -	disabled = atomic_inc_return(&data->disabled); -	if (likely(disabled == 1)) +	if (likely(!atomic_read(&data->disabled)))  		tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); -	atomic_dec(&data->disabled);  	local_irq_restore(flags);  } @@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)  {  	struct trace_array_cpu *data;  	unsigned long flags; -	long disabled;  	int cpu, pc;  	if (!likely(tracer_enabled)) @@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)  	local_irq_save(flags);  	cpu = raw_smp_processor_id();  	data = ctx_trace->data[cpu]; -	disabled = atomic_inc_return(&data->disabled); -	if (likely(disabled == 1)) +	if (likely(!atomic_read(&data->disabled)))  		tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,  					   flags, pc); -	atomic_dec(&data->disabled);  	local_irq_restore(flags);  } | 
