diff options
Diffstat (limited to 'kernel/lockdep.c')
| -rw-r--r-- | kernel/lockdep.c | 80 | 
1 files changed, 69 insertions, 11 deletions
| diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 81a4e4a3f087..d38a64362973 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -39,6 +39,7 @@  #include <linux/irqflags.h>  #include <linux/utsname.h>  #include <linux/hash.h> +#include <linux/ftrace.h>  #include <asm/sections.h> @@ -81,6 +82,8 @@ static int graph_lock(void)  		__raw_spin_unlock(&lockdep_lock);  		return 0;  	} +	/* prevent any recursions within lockdep from causing deadlocks */ +	current->lockdep_recursion++;  	return 1;  } @@ -89,6 +92,7 @@ static inline int graph_unlock(void)  	if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))  		return DEBUG_LOCKS_WARN_ON(1); +	current->lockdep_recursion--;  	__raw_spin_unlock(&lockdep_lock);  	return 0;  } @@ -982,7 +986,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)  	return 1;  } -#ifdef CONFIG_TRACE_IRQFLAGS +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)  /*   * Forwards and backwards subgraph searching, for the purposes of   * proving that two subgraphs can be connected by a new dependency @@ -1458,7 +1462,14 @@ out_bug:  }  unsigned long nr_lock_chains; -static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; +struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; +int nr_chain_hlocks; +static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; + +struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) +{ +	return lock_classes + chain_hlocks[chain->base + i]; +}  /*   * Look up a dependency chain. If the key is not present yet then @@ -1466,10 +1477,15 @@ static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];   * validated. If the key is already hashed, return 0.   * (On return with 1 graph_lock is held.)   */ -static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) +static inline int lookup_chain_cache(struct task_struct *curr, +				     struct held_lock *hlock, +				     u64 chain_key)  { +	struct lock_class *class = hlock->class;  	struct list_head *hash_head = chainhashentry(chain_key);  	struct lock_chain *chain; +	struct held_lock *hlock_curr, *hlock_next; +	int i, j, n, cn;  	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))  		return 0; @@ -1517,6 +1533,32 @@ cache_hit:  	}  	chain = lock_chains + nr_lock_chains++;  	chain->chain_key = chain_key; +	chain->irq_context = hlock->irq_context; +	/* Find the first held_lock of current chain */ +	hlock_next = hlock; +	for (i = curr->lockdep_depth - 1; i >= 0; i--) { +		hlock_curr = curr->held_locks + i; +		if (hlock_curr->irq_context != hlock_next->irq_context) +			break; +		hlock_next = hlock; +	} +	i++; +	chain->depth = curr->lockdep_depth + 1 - i; +	cn = nr_chain_hlocks; +	while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { +		n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); +		if (n == cn) +			break; +		cn = n; +	} +	if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { +		chain->base = cn; +		for (j = 0; j < chain->depth - 1; j++, i++) { +			int lock_id = curr->held_locks[i].class - lock_classes; +			chain_hlocks[chain->base + j] = lock_id; +		} +		chain_hlocks[chain->base + j] = class - lock_classes; +	}  	list_add_tail_rcu(&chain->entry, hash_head);  	debug_atomic_inc(&chain_lookup_misses);  	inc_chains(); @@ -1538,7 +1580,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,  	 * graph_lock for us)  	 */  	if (!hlock->trylock && (hlock->check == 2) && -			lookup_chain_cache(chain_key, hlock->class)) { +	    lookup_chain_cache(curr, hlock, chain_key)) {  		/*  		 * Check whether last held lock:  		 * @@ -1680,7 +1722,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,  static int mark_lock(struct task_struct *curr, struct held_lock *this,  		     enum lock_usage_bit new_bit); -#ifdef CONFIG_TRACE_IRQFLAGS +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)  /*   * print irq inversion bug: @@ -2013,11 +2055,13 @@ void early_boot_irqs_on(void)  /*   * Hardirqs will be enabled:   */ -void trace_hardirqs_on(void) +void trace_hardirqs_on_caller(unsigned long a0)  {  	struct task_struct *curr = current;  	unsigned long ip; +	time_hardirqs_on(CALLER_ADDR0, a0); +  	if (unlikely(!debug_locks || current->lockdep_recursion))  		return; @@ -2055,16 +2099,23 @@ void trace_hardirqs_on(void)  	curr->hardirq_enable_event = ++curr->irq_events;  	debug_atomic_inc(&hardirqs_on_events);  } +EXPORT_SYMBOL(trace_hardirqs_on_caller); +void trace_hardirqs_on(void) +{ +	trace_hardirqs_on_caller(CALLER_ADDR0); +}  EXPORT_SYMBOL(trace_hardirqs_on);  /*   * Hardirqs were disabled:   */ -void trace_hardirqs_off(void) +void trace_hardirqs_off_caller(unsigned long a0)  {  	struct task_struct *curr = current; +	time_hardirqs_off(CALLER_ADDR0, a0); +  	if (unlikely(!debug_locks || current->lockdep_recursion))  		return; @@ -2082,7 +2133,12 @@ void trace_hardirqs_off(void)  	} else  		debug_atomic_inc(&redundant_hardirqs_off);  } +EXPORT_SYMBOL(trace_hardirqs_off_caller); +void trace_hardirqs_off(void) +{ +	trace_hardirqs_off_caller(CALLER_ADDR0); +}  EXPORT_SYMBOL(trace_hardirqs_off);  /* @@ -2246,7 +2302,7 @@ static inline int separate_irq_context(struct task_struct *curr,   * Mark a lock with a usage bit, and validate the state transition:   */  static int mark_lock(struct task_struct *curr, struct held_lock *this, -		     enum lock_usage_bit new_bit) +			     enum lock_usage_bit new_bit)  {  	unsigned int new_mask = 1 << new_bit, ret = 1; @@ -2650,7 +2706,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)   */  static void check_flags(unsigned long flags)  { -#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) +#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ +    defined(CONFIG_TRACE_IRQFLAGS)  	if (!debug_locks)  		return; @@ -2686,7 +2743,7 @@ static void check_flags(unsigned long flags)   * and also avoid lockdep recursion:   */  void lock_acquire(struct lockdep_map *lock, unsigned int subclass, -		  int trylock, int read, int check, unsigned long ip) +			  int trylock, int read, int check, unsigned long ip)  {  	unsigned long flags; @@ -2708,7 +2765,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,  EXPORT_SYMBOL_GPL(lock_acquire); -void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) +void lock_release(struct lockdep_map *lock, int nested, +			  unsigned long ip)  {  	unsigned long flags; | 
