diff options
| -rw-r--r-- | block/blktrace.c | 21 | ||||
| -rw-r--r-- | kernel/trace/kmemtrace.c | 19 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 94 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 11 | ||||
| -rw-r--r-- | kernel/trace/trace_boot.c | 20 | ||||
| -rw-r--r-- | kernel/trace/trace_branch.c | 7 | ||||
| -rw-r--r-- | kernel/trace/trace_hw_branches.c | 7 | ||||
| -rw-r--r-- | kernel/trace/trace_mmiotrace.c | 20 | ||||
| -rw-r--r-- | kernel/trace/trace_power.c | 20 | 
9 files changed, 102 insertions, 117 deletions
| diff --git a/block/blktrace.c b/block/blktrace.c index 8e52f24cc8f9..834cd84037b2 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -187,19 +187,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,  	cpu = raw_smp_processor_id();  	if (blk_tr) { -		struct trace_entry *ent;  		tracing_record_cmdline(current); -		event = ring_buffer_lock_reserve(blk_tr->buffer, -						 sizeof(*t) + pdu_len); +		pc = preempt_count(); +		event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, +						  sizeof(*t) + pdu_len, +						  0, pc);  		if (!event)  			return; - -		ent = ring_buffer_event_data(event); -		t = (struct blk_io_trace *)ent; -		pc = preempt_count(); -		tracing_generic_entry_update(ent, 0, pc); -		ent->type = TRACE_BLK; +		t = ring_buffer_event_data(event);  		goto record_it;  	} @@ -241,12 +237,7 @@ record_it:  			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);  		if (blk_tr) { -			ring_buffer_unlock_commit(blk_tr->buffer, event); -			if (pid != 0 && -			    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && -			    (trace_flags & TRACE_ITER_STACKTRACE) != 0) -				__trace_stack(blk_tr, 0, 5, pc); -			trace_wake_up(); +			trace_buffer_unlock_commit(blk_tr, event, 0, pc);  			return;  		}  	} diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 256749d1032a..ae201b3eda89 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c @@ -276,13 +276,12 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,  	if (!kmem_tracing_enabled)  		return; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC, +					  sizeof(*entry), 0, 0);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, 0); -	entry->ent.type = TRACE_KMEM_ALLOC;  	entry->call_site = call_site;  	entry->ptr = ptr;  	entry->bytes_req = bytes_req; @@ -290,9 +289,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,  	entry->gfp_flags = gfp_flags;  	entry->node	=	node; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); +	trace_buffer_unlock_commit(tr, event, 0, 0);  }  EXPORT_SYMBOL(kmemtrace_mark_alloc_node); @@ -307,20 +304,16 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,  	if (!kmem_tracing_enabled)  		return; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE, +					  sizeof(*entry), 0, 0);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, 0); - -	entry->ent.type = TRACE_KMEM_FREE;  	entry->type_id	= type_id;  	entry->call_site = call_site;  	entry->ptr = ptr; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); +	trace_buffer_unlock_commit(tr, event, 0, 0);  }  EXPORT_SYMBOL(kmemtrace_mark_free); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index eb453a238a6f..8fad3776e843 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -776,6 +776,39 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,  		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);  } +struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, +						    unsigned char type, +						    unsigned long len, +						    unsigned long flags, int pc) +{ +	struct ring_buffer_event *event; + +	event = ring_buffer_lock_reserve(tr->buffer, len); +	if (event != NULL) { +		struct trace_entry *ent = ring_buffer_event_data(event); + +		tracing_generic_entry_update(ent, flags, pc); +		ent->type = type; +	} + +	return event; +} +static void ftrace_trace_stack(struct trace_array *tr, +			       unsigned long flags, int skip, int pc); +static void ftrace_trace_userstack(struct trace_array *tr, +				   unsigned long flags, int pc); + +void trace_buffer_unlock_commit(struct trace_array *tr, +				struct ring_buffer_event *event, +				unsigned long flags, int pc) +{ +	ring_buffer_unlock_commit(tr->buffer, event); + +	ftrace_trace_stack(tr, flags, 6, pc); +	ftrace_trace_userstack(tr, flags, pc); +	trace_wake_up(); +} +  void  trace_function(struct trace_array *tr,  	       unsigned long ip, unsigned long parent_ip, unsigned long flags, @@ -788,12 +821,11 @@ trace_function(struct trace_array *tr,  	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))  		return; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), +					  flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type			= TRACE_FN;  	entry->ip			= ip;  	entry->parent_ip		= parent_ip;  	ring_buffer_unlock_commit(tr->buffer, event); @@ -811,12 +843,11 @@ static void __trace_graph_entry(struct trace_array *tr,  	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))  		return; -	event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, +					  sizeof(*entry), flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type			= TRACE_GRAPH_ENT;  	entry->graph_ent			= *trace;  	ring_buffer_unlock_commit(global_trace.buffer, event);  } @@ -832,12 +863,11 @@ static void __trace_graph_return(struct trace_array *tr,  	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))  		return; -	event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET, +					  sizeof(*entry), flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type			= TRACE_GRAPH_RET;  	entry->ret				= *trace;  	ring_buffer_unlock_commit(global_trace.buffer, event);  } @@ -861,13 +891,11 @@ static void __ftrace_trace_stack(struct trace_array *tr,  	struct stack_entry *entry;  	struct stack_trace trace; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_STACK, +					  sizeof(*entry), flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type		= TRACE_STACK; -  	memset(&entry->caller, 0, sizeof(entry->caller));  	trace.nr_entries	= 0; @@ -908,12 +936,11 @@ static void ftrace_trace_userstack(struct trace_array *tr,  	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))  		return; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, +					  sizeof(*entry), flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type		= TRACE_USER_STACK;  	memset(&entry->caller, 0, sizeof(entry->caller)); @@ -941,20 +968,15 @@ ftrace_trace_special(void *__tr,  	struct trace_array *tr = __tr;  	struct special_entry *entry; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, +					  sizeof(*entry), 0, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, pc); -	entry->ent.type			= TRACE_SPECIAL;  	entry->arg1			= arg1;  	entry->arg2			= arg2;  	entry->arg3			= arg3; -	ring_buffer_unlock_commit(tr->buffer, event); -	ftrace_trace_stack(tr, 0, 4, pc); -	ftrace_trace_userstack(tr, 0, pc); - -	trace_wake_up(); +	trace_buffer_unlock_commit(tr, event, 0, pc);  }  void @@ -973,12 +995,11 @@ tracing_sched_switch_trace(struct trace_array *tr,  	struct ring_buffer_event *event;  	struct ctx_switch_entry *entry; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_CTX, +					  sizeof(*entry), flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type			= TRACE_CTX;  	entry->prev_pid			= prev->pid;  	entry->prev_prio		= prev->prio;  	entry->prev_state		= prev->state; @@ -986,9 +1007,7 @@ tracing_sched_switch_trace(struct trace_array *tr,  	entry->next_prio		= next->prio;  	entry->next_state		= next->state;  	entry->next_cpu	= task_cpu(next); -	ring_buffer_unlock_commit(tr->buffer, event); -	ftrace_trace_stack(tr, flags, 5, pc); -	ftrace_trace_userstack(tr, flags, pc); +	trace_buffer_unlock_commit(tr, event, flags, pc);  }  void @@ -1000,12 +1019,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr,  	struct ring_buffer_event *event;  	struct ctx_switch_entry *entry; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_WAKE, +					  sizeof(*entry), flags, pc);  	if (!event)  		return;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type			= TRACE_WAKE;  	entry->prev_pid			= curr->pid;  	entry->prev_prio		= curr->prio;  	entry->prev_state		= curr->state; @@ -1013,11 +1031,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,  	entry->next_prio		= wakee->prio;  	entry->next_state		= wakee->state;  	entry->next_cpu			= task_cpu(wakee); -	ring_buffer_unlock_commit(tr->buffer, event); -	ftrace_trace_stack(tr, flags, 6, pc); -	ftrace_trace_userstack(tr, flags, pc); - -	trace_wake_up(); +	trace_buffer_unlock_commit(tr, event, flags, pc);  }  void @@ -2825,12 +2839,10 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)  	trace_buf[len] = 0;  	size = sizeof(*entry) + len + 1; -	event = ring_buffer_lock_reserve(tr->buffer, size); +	event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);  	if (!event)  		goto out_unlock;  	entry = ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, irq_flags, pc); -	entry->ent.type			= TRACE_PRINT;  	entry->ip			= ip;  	entry->depth			= depth; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index df627a948694..e03f157c772e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -403,6 +403,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp);  struct dentry *tracing_init_dentry(void);  void init_tracer_sysprof_debugfs(struct dentry *d_tracer); +struct ring_buffer_event; + +struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, +						    unsigned char type, +						    unsigned long len, +						    unsigned long flags, +						    int pc); +void trace_buffer_unlock_commit(struct trace_array *tr, +				struct ring_buffer_event *event, +				unsigned long flags, int pc); +  struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,  						struct trace_array_cpu *data); diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 4e08debf662d..7a30fc4c3642 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -143,17 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)  	sprint_symbol(bt->func, (unsigned long)fn);  	preempt_disable(); -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, +					  sizeof(*entry), 0, 0);  	if (!event)  		goto out;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, 0); -	entry->ent.type = TRACE_BOOT_CALL;  	entry->boot_call = *bt; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); - +	trace_buffer_unlock_commit(tr, event, 0, 0);   out:  	preempt_enable();  } @@ -170,17 +166,13 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)  	sprint_symbol(bt->func, (unsigned long)fn);  	preempt_disable(); -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, +					  sizeof(*entry), 0, 0);  	if (!event)  		goto out;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, 0); -	entry->ent.type = TRACE_BOOT_RET;  	entry->boot_ret = *bt; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); - +	trace_buffer_unlock_commit(tr, event, 0, 0);   out:  	preempt_enable();  } diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 770e52acfc10..48b2196abe37 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -52,14 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)  	if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)  		goto out; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	pc = preempt_count(); +	event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, +					  sizeof(*entry), flags, pc);  	if (!event)  		goto out; -	pc = preempt_count();  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, flags, pc); -	entry->ent.type		= TRACE_BRANCH;  	/* Strip off the path, only save the file */  	p = f->file + strlen(f->file); diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index e720c001db2b..2aa1c9f4c7d8 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c @@ -189,16 +189,15 @@ void trace_hw_branch(u64 from, u64 to)  	if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)  		goto out; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, +					  sizeof(*entry), 0, 0);  	if (!event)  		goto out;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, from); -	entry->ent.type = TRACE_HW_BRANCHES;  	entry->ent.cpu = cpu;  	entry->from = from;  	entry->to   = to; -	ring_buffer_unlock_commit(tr->buffer, event); +	trace_buffer_unlock_commit(tr, event, 0, 0);   out:  	atomic_dec(&tr->data[cpu]->disabled); diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 104ddebc11d1..c401b908e805 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -307,19 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,  {  	struct ring_buffer_event *event;  	struct trace_mmiotrace_rw *entry; +	int pc = preempt_count(); -	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, +					  sizeof(*entry), 0, pc);  	if (!event) {  		atomic_inc(&dropped_count);  		return;  	}  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, preempt_count()); -	entry->ent.type			= TRACE_MMIO_RW;  	entry->rw			= *rw; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); +	trace_buffer_unlock_commit(tr, event, 0, pc);  }  void mmio_trace_rw(struct mmiotrace_rw *rw) @@ -335,19 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,  {  	struct ring_buffer_event *event;  	struct trace_mmiotrace_map *entry; +	int pc = preempt_count(); -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, +					  sizeof(*entry), 0, pc);  	if (!event) {  		atomic_inc(&dropped_count);  		return;  	}  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, preempt_count()); -	entry->ent.type			= TRACE_MMIO_MAP;  	entry->map			= *map; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); +	trace_buffer_unlock_commit(tr, event, 0, pc);  }  void mmio_trace_mapping(struct mmiotrace_map *map) diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 3b1a292d12d2..bfc21f8079ab 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c @@ -124,17 +124,13 @@ void trace_power_end(struct power_trace *it)  	it->end = ktime_get();  	data = tr->data[smp_processor_id()]; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_POWER, +					  sizeof(*entry), 0, 0);  	if (!event)  		goto out;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, 0); -	entry->ent.type = TRACE_POWER;  	entry->state_data = *it; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); - +	trace_buffer_unlock_commit(tr, event, 0, 0);   out:  	preempt_enable();  } @@ -159,17 +155,13 @@ void trace_power_mark(struct power_trace *it, unsigned int type,  	it->end = it->stamp;  	data = tr->data[smp_processor_id()]; -	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); +	event = trace_buffer_lock_reserve(tr, TRACE_POWER, +					  sizeof(*entry), 0, 0);  	if (!event)  		goto out;  	entry	= ring_buffer_event_data(event); -	tracing_generic_entry_update(&entry->ent, 0, 0); -	entry->ent.type = TRACE_POWER;  	entry->state_data = *it; -	ring_buffer_unlock_commit(tr->buffer, event); - -	trace_wake_up(); - +	trace_buffer_unlock_commit(tr, event, 0, 0);   out:  	preempt_enable();  } | 
