diff options
Diffstat (limited to 'lib/rhashtable.c')
| -rw-r--r-- | lib/rhashtable.c | 73 | 
1 files changed, 42 insertions, 31 deletions
| diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a54ff8949f91..cc808707d1cf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -231,9 +231,6 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,  	 */  	rcu_assign_pointer(old_tbl->future_tbl, new_tbl); -	/* Ensure the new table is visible to readers. */ -	smp_wmb(); -  	spin_unlock_bh(old_tbl->locks);  	return 0; @@ -389,33 +386,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,  	return false;  } -int rhashtable_insert_rehash(struct rhashtable *ht) +int rhashtable_insert_rehash(struct rhashtable *ht, +			     struct bucket_table *tbl)  {  	struct bucket_table *old_tbl;  	struct bucket_table *new_tbl; -	struct bucket_table *tbl;  	unsigned int size;  	int err;  	old_tbl = rht_dereference_rcu(ht->tbl, ht); -	tbl = rhashtable_last_table(ht, old_tbl);  	size = tbl->size; +	err = -EBUSY; +  	if (rht_grow_above_75(ht, tbl))  		size *= 2;  	/* Do not schedule more than one rehash */  	else if (old_tbl != tbl) -		return -EBUSY; +		goto fail; + +	err = -ENOMEM;  	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); -	if (new_tbl == NULL) { -		/* Schedule async resize/rehash to try allocation -		 * non-atomic context. -		 */ -		schedule_work(&ht->run_work); -		return -ENOMEM; -	} +	if (new_tbl == NULL) +		goto fail;  	err = rhashtable_rehash_attach(ht, tbl, new_tbl);  	if (err) { @@ -426,12 +421,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)  		schedule_work(&ht->run_work);  	return err; + +fail: +	/* Do not fail the insert if someone else did a rehash. */ +	if (likely(rcu_dereference_raw(tbl->future_tbl))) +		return 0; + +	/* Schedule async rehash to retry allocation in process context. */ +	if (err == -ENOMEM) +		schedule_work(&ht->run_work); + +	return err;  }  EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); -int rhashtable_insert_slow(struct rhashtable *ht, const void *key, -			   struct rhash_head *obj, -			   struct bucket_table *tbl) +struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, +					    const void *key, +					    struct rhash_head *obj, +					    struct bucket_table *tbl)  {  	struct rhash_head *head;  	unsigned int hash; @@ -467,7 +474,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,  exit:  	spin_unlock(rht_bucket_lock(tbl, hash)); -	return err; +	if (err == 0) +		return NULL; +	else if (err == -EAGAIN) +		return tbl; +	else +		return ERR_PTR(err);  }  EXPORT_SYMBOL_GPL(rhashtable_insert_slow); @@ -503,10 +515,11 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)  	if (!iter->walker)  		return -ENOMEM; -	mutex_lock(&ht->mutex); -	iter->walker->tbl = rht_dereference(ht->tbl, ht); +	spin_lock(&ht->lock); +	iter->walker->tbl = +		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));  	list_add(&iter->walker->list, &iter->walker->tbl->walkers); -	mutex_unlock(&ht->mutex); +	spin_unlock(&ht->lock);  	return 0;  } @@ -520,10 +533,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);   */  void rhashtable_walk_exit(struct rhashtable_iter *iter)  { -	mutex_lock(&iter->ht->mutex); +	spin_lock(&iter->ht->lock);  	if (iter->walker->tbl)  		list_del(&iter->walker->list); -	mutex_unlock(&iter->ht->mutex); +	spin_unlock(&iter->ht->lock);  	kfree(iter->walker);  }  EXPORT_SYMBOL_GPL(rhashtable_walk_exit); @@ -547,14 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)  {  	struct rhashtable *ht = iter->ht; -	mutex_lock(&ht->mutex); +	rcu_read_lock(); +	spin_lock(&ht->lock);  	if (iter->walker->tbl)  		list_del(&iter->walker->list); - -	rcu_read_lock(); - -	mutex_unlock(&ht->mutex); +	spin_unlock(&ht->lock);  	if (!iter->walker->tbl) {  		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); @@ -723,9 +734,6 @@ int rhashtable_init(struct rhashtable *ht,  	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))  		return -EINVAL; -	if (params->nelem_hint) -		size = rounded_hashtable_size(params); -  	memset(ht, 0, sizeof(*ht));  	mutex_init(&ht->mutex);  	spin_lock_init(&ht->lock); @@ -745,6 +753,9 @@ int rhashtable_init(struct rhashtable *ht,  	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); +	if (params->nelem_hint) +		size = rounded_hashtable_size(&ht->p); +  	/* The maximum (not average) chain length grows with the  	 * size of the hash table, at a rate of (log N)/(log log N).  	 * The value of 16 is selected so that even if the hash | 
