diff options
Diffstat (limited to 'kernel/trace/trace.h')
| -rw-r--r-- | kernel/trace/trace.h | 116 | 
1 files changed, 100 insertions, 16 deletions
| diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 749a182dab48..bd3e3069300e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -397,6 +397,9 @@ struct trace_array {  	struct ftrace_ops	*ops;  	struct trace_pid_list	__rcu *function_pids;  	struct trace_pid_list	__rcu *function_no_pids; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +	struct fgraph_ops	*gops; +#endif  #ifdef CONFIG_DYNAMIC_FTRACE  	/* All of these are protected by the ftrace_lock */  	struct list_head	func_probes; @@ -679,9 +682,8 @@ void trace_latency_header(struct seq_file *m);  void trace_default_header(struct seq_file *m);  void print_trace_header(struct seq_file *m, struct trace_iterator *iter); -void trace_graph_return(struct ftrace_graph_ret *trace); -int trace_graph_entry(struct ftrace_graph_ent *trace); -void set_graph_array(struct trace_array *tr); +void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); +int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);  void tracing_start_cmdline_record(void);  void tracing_stop_cmdline_record(void); @@ -892,12 +894,59 @@ extern int __trace_graph_entry(struct trace_array *tr,  extern void __trace_graph_return(struct trace_array *tr,  				 struct ftrace_graph_ret *trace,  				 unsigned int trace_ctx); +extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); +extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); +extern void free_fgraph_ops(struct trace_array *tr); + +enum { +	TRACE_GRAPH_FL		= 1, + +	/* +	 * In the very unlikely case that an interrupt came in +	 * at a start of graph tracing, and we want to trace +	 * the function in that interrupt, the depth can be greater +	 * than zero, because of the preempted start of a previous +	 * trace. In an even more unlikely case, depth could be 2 +	 * if a softirq interrupted the start of graph tracing, +	 * followed by an interrupt preempting a start of graph +	 * tracing in the softirq, and depth can even be 3 +	 * if an NMI came in at the start of an interrupt function +	 * that preempted a softirq start of a function that +	 * preempted normal context!!!! Luckily, it can't be +	 * greater than 3, so the next two bits are a mask +	 * of what the depth is when we set TRACE_GRAPH_FL +	 */ + +	TRACE_GRAPH_DEPTH_START_BIT, +	TRACE_GRAPH_DEPTH_END_BIT, + +	/* +	 * To implement set_graph_notrace, if this bit is set, we ignore +	 * function graph tracing of called functions, until the return +	 * function is called to clear it. +	 */ +	TRACE_GRAPH_NOTRACE_BIT, +}; + +#define TRACE_GRAPH_NOTRACE		(1 << TRACE_GRAPH_NOTRACE_BIT) + +static inline unsigned long ftrace_graph_depth(unsigned long *task_var) +{ +	return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; +} + +static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) +{ +	*task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); +	*task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; +}  #ifdef CONFIG_DYNAMIC_FTRACE  extern struct ftrace_hash __rcu *ftrace_graph_hash;  extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; -static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) +static inline int +ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)  {  	unsigned long addr = trace->func;  	int ret = 0; @@ -919,13 +968,12 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)  	}  	if (ftrace_lookup_ip(hash, addr)) { -  		/*  		 * This needs to be cleared on the return functions  		 * when the depth is zero.  		 */ -		trace_recursion_set(TRACE_GRAPH_BIT); -		trace_recursion_set_depth(trace->depth); +		*task_var |= TRACE_GRAPH_FL; +		ftrace_graph_set_depth(task_var, trace->depth);  		/*  		 * If no irqs are to be traced, but a set_graph_function @@ -944,11 +992,14 @@ out:  	return ret;  } -static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) +static inline void +ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)  { -	if (trace_recursion_test(TRACE_GRAPH_BIT) && -	    trace->depth == trace_recursion_depth()) -		trace_recursion_clear(TRACE_GRAPH_BIT); +	unsigned long *task_var = fgraph_get_task_var(gops); + +	if ((*task_var & TRACE_GRAPH_FL) && +	    trace->depth == ftrace_graph_depth(task_var)) +		*task_var &= ~TRACE_GRAPH_FL;  }  static inline int ftrace_graph_notrace_addr(unsigned long addr) @@ -974,7 +1025,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)  	return ret;  }  #else -static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) +static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)  {  	return 1;  } @@ -983,27 +1034,37 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)  {  	return 0;  } -static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) +static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)  { }  #endif /* CONFIG_DYNAMIC_FTRACE */  extern unsigned int fgraph_max_depth; -static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) +static inline bool +ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)  { +	unsigned long *task_var = fgraph_get_task_var(gops); +  	/* trace it when it is-nested-in or is a function enabled. */ -	return !(trace_recursion_test(TRACE_GRAPH_BIT) || -		 ftrace_graph_addr(trace)) || +	return !((*task_var & TRACE_GRAPH_FL) || +		 ftrace_graph_addr(task_var, trace)) ||  		(trace->depth < 0) ||  		(fgraph_max_depth && trace->depth >= fgraph_max_depth);  } +void fgraph_init_ops(struct ftrace_ops *dst_ops, +		     struct ftrace_ops *src_ops); +  #else /* CONFIG_FUNCTION_GRAPH_TRACER */  static inline enum print_line_t  print_graph_function_flags(struct trace_iterator *iter, u32 flags)  {  	return TRACE_TYPE_UNHANDLED;  } +static inline void free_fgraph_ops(struct trace_array *tr) { } +/* ftrace_ops may not be defined */ +#define init_array_fgraph_ops(tr, ops) do { } while (0) +#define allocate_fgraph_ops(tr, ops) ({ 0; })  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */  extern struct list_head ftrace_pids; @@ -1573,6 +1634,29 @@ static inline void *event_file_data(struct file *filp)  extern struct mutex event_mutex;  extern struct list_head ftrace_events; +/* + * When the trace_event_file is the filp->i_private pointer, + * it must be taken under the event_mutex lock, and then checked + * if the EVENT_FILE_FL_FREED flag is set. If it is, then the + * data pointed to by the trace_event_file can not be trusted. + * + * Use the event_file_file() to access the trace_event_file from + * the filp the first time under the event_mutex and check for + * NULL. If it is needed to be retrieved again and the event_mutex + * is still held, then the event_file_data() can be used and it + * is guaranteed to be valid. + */ +static inline struct trace_event_file *event_file_file(struct file *filp) +{ +	struct trace_event_file *file; + +	lockdep_assert_held(&event_mutex); +	file = READ_ONCE(file_inode(filp)->i_private); +	if (!file || file->flags & EVENT_FILE_FL_FREED) +		return NULL; +	return file; +} +  extern const struct file_operations event_trigger_fops;  extern const struct file_operations event_hist_fops;  extern const struct file_operations event_hist_debug_fops; | 
