diff options
Diffstat (limited to 'kernel/futex.c')
| -rw-r--r-- | kernel/futex.c | 90 | 
1 files changed, 67 insertions, 23 deletions
| diff --git a/kernel/futex.c b/kernel/futex.c index 44a1261cb9ff..67dacaf93e56 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -157,7 +157,9 @@   * enqueue.   */ +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG  int __read_mostly futex_cmpxchg_enabled; +#endif  /*   * Futex flags used to encode options to functions and preserve them across @@ -234,6 +236,7 @@ static const struct futex_q futex_q_init = {   * waiting on a futex.   */  struct futex_hash_bucket { +	atomic_t waiters;  	spinlock_t lock;  	struct plist_head chain;  } ____cacheline_aligned_in_smp; @@ -253,22 +256,37 @@ static inline void futex_get_mm(union futex_key *key)  	smp_mb__after_atomic_inc();  } -static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) +/* + * Reflects a new waiter being added to the waitqueue. + */ +static inline void hb_waiters_inc(struct futex_hash_bucket *hb)  {  #ifdef CONFIG_SMP +	atomic_inc(&hb->waiters);  	/* -	 * Tasks trying to enter the critical region are most likely -	 * potential waiters that will be added to the plist. Ensure -	 * that wakers won't miss to-be-slept tasks in the window between -	 * the wait call and the actual plist_add. +	 * Full barrier (A), see the ordering comment above.  	 */ -	if (spin_is_locked(&hb->lock)) -		return true; -	smp_rmb(); /* Make sure we check the lock state first */ +	smp_mb__after_atomic_inc(); +#endif +} + +/* + * Reflects a waiter being removed from the waitqueue by wakeup + * paths. + */ +static inline void hb_waiters_dec(struct futex_hash_bucket *hb) +{ +#ifdef CONFIG_SMP +	atomic_dec(&hb->waiters); +#endif +} -	return !plist_head_empty(&hb->chain); +static inline int hb_waiters_pending(struct futex_hash_bucket *hb) +{ +#ifdef CONFIG_SMP +	return atomic_read(&hb->waiters);  #else -	return true; +	return 1;  #endif  } @@ -954,6 +972,7 @@ static void __unqueue_futex(struct futex_q *q)  	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);  	plist_del(&q->list, &hb->chain); +	hb_waiters_dec(hb);  }  /* @@ -1257,7 +1276,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,  	 */  	if (likely(&hb1->chain != &hb2->chain)) {  		plist_del(&q->list, &hb1->chain); +		hb_waiters_dec(hb1);  		plist_add(&q->list, &hb2->chain); +		hb_waiters_inc(hb2);  		q->lock_ptr = &hb2->lock;  	}  	get_futex_key_refs(key2); @@ -1600,6 +1621,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)  	struct futex_hash_bucket *hb;  	hb = hash_futex(&q->key); + +	/* +	 * Increment the counter before taking the lock so that +	 * a potential waker won't miss a to-be-slept task that is +	 * waiting for the spinlock. This is safe as all queue_lock() +	 * users end up calling queue_me(). Similarly, for housekeeping, +	 * decrement the counter at queue_unlock() when some error has +	 * occurred and we don't end up adding the task to the list. +	 */ +	hb_waiters_inc(hb); +  	q->lock_ptr = &hb->lock;  	spin_lock(&hb->lock); /* implies MB (A) */ @@ -1611,6 +1643,7 @@ queue_unlock(struct futex_hash_bucket *hb)  	__releases(&hb->lock)  {  	spin_unlock(&hb->lock); +	hb_waiters_dec(hb);  }  /** @@ -2342,6 +2375,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,  		 * Unqueue the futex_q and determine which it was.  		 */  		plist_del(&q->list, &hb->chain); +		hb_waiters_dec(hb);  		/* Handle spurious wakeups gracefully */  		ret = -EWOULDBLOCK; @@ -2843,9 +2877,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,  	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);  } -static int __init futex_init(void) +static void __init futex_detect_cmpxchg(void)  { +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG  	u32 curval; + +	/* +	 * This will fail and we want it. Some arch implementations do +	 * runtime detection of the futex_atomic_cmpxchg_inatomic() +	 * functionality. We want to know that before we call in any +	 * of the complex code paths. Also we want to prevent +	 * registration of robust lists in that case. NULL is +	 * guaranteed to fault and we get -EFAULT on functional +	 * implementation, the non-functional ones will return +	 * -ENOSYS. +	 */ +	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) +		futex_cmpxchg_enabled = 1; +#endif +} + +static int __init futex_init(void) +{  	unsigned int futex_shift;  	unsigned long i; @@ -2861,20 +2914,11 @@ static int __init futex_init(void)  					       &futex_shift, NULL,  					       futex_hashsize, futex_hashsize);  	futex_hashsize = 1UL << futex_shift; -	/* -	 * This will fail and we want it. Some arch implementations do -	 * runtime detection of the futex_atomic_cmpxchg_inatomic() -	 * functionality. We want to know that before we call in any -	 * of the complex code paths. Also we want to prevent -	 * registration of robust lists in that case. NULL is -	 * guaranteed to fault and we get -EFAULT on functional -	 * implementation, the non-functional ones will return -	 * -ENOSYS. -	 */ -	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) -		futex_cmpxchg_enabled = 1; + +	futex_detect_cmpxchg();  	for (i = 0; i < futex_hashsize; i++) { +		atomic_set(&futex_queues[i].waiters, 0);  		plist_head_init(&futex_queues[i].chain);  		spin_lock_init(&futex_queues[i].lock);  	} | 
