diff options
Diffstat (limited to 'lib/xarray.c')
| -rw-r--r-- | lib/xarray.c | 157 | 
1 files changed, 145 insertions, 12 deletions
| diff --git a/lib/xarray.c b/lib/xarray.c index 116e9286c64e..9644b18af18d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -278,6 +278,7 @@ void xas_destroy(struct xa_state *xas)  		xas->xa_alloc = node = next;  	}  } +EXPORT_SYMBOL_GPL(xas_destroy);  /**   * xas_nomem() - Allocate memory if needed. @@ -1007,6 +1008,26 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,  	}  } +static void __xas_init_node_for_split(struct xa_state *xas, +		struct xa_node *node, void *entry) +{ +	unsigned int i; +	void *sibling = NULL; +	unsigned int mask = xas->xa_sibs; + +	if (!node) +		return; +	node->array = xas->xa; +	for (i = 0; i < XA_CHUNK_SIZE; i++) { +		if ((i & mask) == 0) { +			RCU_INIT_POINTER(node->slots[i], entry); +			sibling = xa_mk_sibling(i); +		} else { +			RCU_INIT_POINTER(node->slots[i], sibling); +		} +	} +} +  /**   * xas_split_alloc() - Allocate memory for splitting an entry.   * @xas: XArray operation state. @@ -1025,7 +1046,6 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,  		gfp_t gfp)  {  	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; -	unsigned int mask = xas->xa_sibs;  	/* XXX: no support for splitting really large entries yet */  	if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order)) @@ -1034,22 +1054,13 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,  		return;  	do { -		unsigned int i; -		void *sibling = NULL;  		struct xa_node *node;  		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);  		if (!node)  			goto nomem; -		node->array = xas->xa; -		for (i = 0; i < XA_CHUNK_SIZE; i++) { -			if ((i & mask) == 0) { -				RCU_INIT_POINTER(node->slots[i], entry); -				sibling = xa_mk_sibling(i); -			} else { -				RCU_INIT_POINTER(node->slots[i], sibling); -			} -		} + +		__xas_init_node_for_split(xas, node, entry);  		RCU_INIT_POINTER(node->parent, xas->xa_alloc);  		xas->xa_alloc = node;  	} while (sibs-- > 0); @@ -1122,6 +1133,128 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)  	xas_update(xas, node);  }  EXPORT_SYMBOL_GPL(xas_split); + +/** + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept + * @order: Current entry order. + * + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if + * no new xa_node is needed. This function provides the minimal order + * xas_try_split() supports. + * + * Return: the minimal order xas_try_split() supports + * + * Context: Any context. + * + */ +unsigned int xas_try_split_min_order(unsigned int order) +{ +	if (order % XA_CHUNK_SHIFT == 0) +		return order == 0 ? 0 : order - 1; + +	return order - (order % XA_CHUNK_SHIFT); +} +EXPORT_SYMBOL_GPL(xas_try_split_min_order); + +/** + * xas_try_split() - Try to split a multi-index entry. + * @xas: XArray operation state. + * @entry: New entry to store in the array. + * @order: Current entry order. + * + * The size of the new entries is set in @xas.  The value in @entry is + * copied to all the replacement entries. If and only if one new xa_node is + * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is + * NULL. If more new xa_node are needed, the function gives EINVAL error. + * + * NOTE: use xas_try_split_min_order() to get next split order instead of + * @order - 1 if you want to minmize xas_try_split() calls. + * + * Context: Any context.  The caller should hold the xa_lock. + */ +void xas_try_split(struct xa_state *xas, void *entry, unsigned int order) +{ +	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; +	unsigned int offset, marks; +	struct xa_node *node; +	void *curr = xas_load(xas); +	int values = 0; +	gfp_t gfp = GFP_NOWAIT; + +	node = xas->xa_node; +	if (xas_top(node)) +		return; + +	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) +		gfp |= __GFP_ACCOUNT; + +	marks = node_get_marks(node, xas->xa_offset); + +	offset = xas->xa_offset + sibs; + +	if (xas->xa_shift < node->shift) { +		struct xa_node *child = xas->xa_alloc; +		unsigned int expected_sibs = +			(1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1; + +		/* +		 * No support for splitting sibling entries +		 * (horizontally) or cascade split (vertically), which +		 * requires two or more new xa_nodes. +		 * Since if one xa_node allocation fails, +		 * it is hard to free the prior allocations. +		 */ +		if (sibs || xas->xa_sibs != expected_sibs) { +			xas_destroy(xas); +			xas_set_err(xas, -EINVAL); +			return; +		} + +		if (!child) { +			child = kmem_cache_alloc_lru(radix_tree_node_cachep, +						     xas->xa_lru, gfp); +			if (!child) { +				xas_destroy(xas); +				xas_set_err(xas, -ENOMEM); +				return; +			} +			RCU_INIT_POINTER(child->parent, xas->xa_alloc); +		} +		__xas_init_node_for_split(xas, child, entry); + +		xas->xa_alloc = rcu_dereference_raw(child->parent); +		child->shift = node->shift - XA_CHUNK_SHIFT; +		child->offset = offset; +		child->count = XA_CHUNK_SIZE; +		child->nr_values = xa_is_value(entry) ? +				XA_CHUNK_SIZE : 0; +		RCU_INIT_POINTER(child->parent, node); +		node_set_marks(node, offset, child, xas->xa_sibs, +				marks); +		rcu_assign_pointer(node->slots[offset], +				xa_mk_node(child)); +		if (xa_is_value(curr)) +			values--; +		xas_update(xas, child); + +	} else { +		do { +			unsigned int canon = offset - xas->xa_sibs; + +			node_set_marks(node, canon, NULL, 0, marks); +			rcu_assign_pointer(node->slots[canon], entry); +			while (offset > canon) +				rcu_assign_pointer(node->slots[offset--], +						xa_mk_sibling(canon)); +			values += (xa_is_value(entry) - xa_is_value(curr)) * +					(xas->xa_sibs + 1); +		} while (offset-- > xas->xa_offset); +	} + +	node->nr_values += values; +	xas_update(xas, node); +} +EXPORT_SYMBOL_GPL(xas_try_split);  #endif  /** | 
