diff options
Diffstat (limited to 'kernel/jump_label.c')
| -rw-r--r-- | kernel/jump_label.c | 76 | 
1 files changed, 47 insertions, 29 deletions
| diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3218fa5688b9..6dc76b590703 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)  	STATIC_KEY_CHECK_USE(key);  	/*  	 * Negative key->enabled has a special meaning: it sends -	 * static_key_slow_inc() down the slow path, and it is non-zero -	 * so it counts as "enabled" in jump_label_update().  Note that -	 * atomic_inc_unless_negative() checks >= 0, so roll our own. +	 * static_key_slow_inc/dec() down the slow path, and it is non-zero +	 * so it counts as "enabled" in jump_label_update(). +	 * +	 * The INT_MAX overflow condition is either used by the networking +	 * code to reset or detected in the slow path of +	 * static_key_slow_inc_cpuslocked().  	 */  	v = atomic_read(&key->enabled);  	do { -		if (v <= 0 || (v + 1) < 0) +		if (v <= 0 || v == INT_MAX)  			return false;  	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); @@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)  	lockdep_assert_cpus_held();  	/* -	 * Careful if we get concurrent static_key_slow_inc() calls; +	 * Careful if we get concurrent static_key_slow_inc/dec() calls;  	 * later calls must wait for the first one to _finish_ the  	 * jump_label_update() process.  At the same time, however,  	 * the jump_label_update() call below wants to see @@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)  	if (static_key_fast_inc_not_disabled(key))  		return true; -	jump_label_lock(); -	if (atomic_read(&key->enabled) == 0) { -		atomic_set(&key->enabled, -1); +	guard(mutex)(&jump_label_mutex); +	/* Try to mark it as 'enabling in progress. */ +	if (!atomic_cmpxchg(&key->enabled, 0, -1)) {  		jump_label_update(key);  		/* -		 * Ensure that if the above cmpxchg loop observes our positive -		 * value, it must also observe all the text changes. +		 * Ensure that when static_key_fast_inc_not_disabled() or +		 * static_key_slow_try_dec() observe the positive value, +		 * they must also observe all the text changes.  		 */  		atomic_set_release(&key->enabled, 1);  	} else { -		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) { -			jump_label_unlock(); +		/* +		 * While holding the mutex this should never observe +		 * anything else than a value >= 1 and succeed +		 */ +		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))  			return false; -		}  	} -	jump_label_unlock();  	return true;  } @@ -231,7 +236,7 @@ void static_key_disable_cpuslocked(struct static_key *key)  	}  	jump_label_lock(); -	if (atomic_cmpxchg(&key->enabled, 1, 0)) +	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)  		jump_label_update(key);  	jump_label_unlock();  } @@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);  static bool static_key_slow_try_dec(struct static_key *key)  { -	int val; - -	val = atomic_fetch_add_unless(&key->enabled, -1, 1); -	if (val == 1) -		return false; +	int v;  	/* -	 * The negative count check is valid even when a negative -	 * key->enabled is in use by static_key_slow_inc(); a -	 * __static_key_slow_dec() before the first static_key_slow_inc() -	 * returns is unbalanced, because all other static_key_slow_inc() -	 * instances block while the update is in progress. +	 * Go into the slow path if key::enabled is less than or equal than +	 * one. One is valid to shut down the key, anything less than one +	 * is an imbalance, which is handled at the call site. +	 * +	 * That includes the special case of '-1' which is set in +	 * static_key_slow_inc_cpuslocked(), but that's harmless as it is +	 * fully serialized in the slow path below. By the time this task +	 * acquires the jump label lock the value is back to one and the +	 * retry under the lock must succeed.  	 */ -	WARN(val < 0, "jump label: negative count!\n"); +	v = atomic_read(&key->enabled); +	do { +		/* +		 * Warn about the '-1' case though; since that means a +		 * decrement is concurrent with a first (0->1) increment. IOW +		 * people are trying to disable something that wasn't yet fully +		 * enabled. This suggests an ordering problem on the user side. +		 */ +		WARN_ON_ONCE(v < 0); +		if (v <= 1) +			return false; +	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); +  	return true;  } @@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)  	if (static_key_slow_try_dec(key))  		return; -	jump_label_lock(); -	if (atomic_dec_and_test(&key->enabled)) +	guard(mutex)(&jump_label_mutex); +	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)  		jump_label_update(key); -	jump_label_unlock(); +	else +		WARN_ON_ONCE(!static_key_slow_try_dec(key));  }  static void __static_key_slow_dec(struct static_key *key) | 
