summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2011-06-28 09:56:33 +0200
committerRichard Braun <rbraun@sceen.net>2011-06-28 09:56:33 +0200
commitca10664c442b660e9231ee357f8979888be3e185 (patch)
tree23ad4e5d752f0803558c30d465853fcd4acbdcfc
Initial commit.
-rw-r--r--.gitignore2
-rw-r--r--CMakeLists.txt93
-rw-r--r--LICENSE22
-rw-r--r--avltree.c533
-rw-r--r--avltree.h305
-rw-r--r--avltree_i.h196
-rw-r--r--cpu.h82
-rw-r--r--error.c91
-rw-r--r--error.h84
-rw-r--r--hash.h107
-rw-r--r--list.c135
-rw-r--r--list.h371
-rw-r--r--macros.h68
-rw-r--r--mem.c1785
-rw-r--r--mem.h120
-rw-r--r--mem_malloc.c148
-rw-r--r--mem_malloc.h42
-rw-r--r--phys.c754
-rw-r--r--phys.h61
-rw-r--r--rbtree.c486
-rw-r--r--rbtree.h304
-rw-r--r--rbtree_i.h189
-rw-r--r--rdatree.c657
-rw-r--r--rdatree.h150
-rw-r--r--test/test_avltree.c132
-rw-r--r--test/test_mem.c56
-rw-r--r--test/test_mem_cache.c142
-rw-r--r--test/test_mem_cache_double_free.c73
-rw-r--r--test/test_mem_cache_invalid_free.c72
-rw-r--r--test/test_mem_cache_write_beyond.c87
-rw-r--r--test/test_mem_cache_write_buftag.c89
-rw-r--r--test/test_mem_cache_write_free.c103
-rw-r--r--test/test_mem_offbyone.c57
-rw-r--r--test/test_phys.c63
-rw-r--r--test/test_rbtree.c117
-rw-r--r--test/test_rdatree.c619
-rw-r--r--test/test_xprintf.c297
-rw-r--r--xprintf.c583
-rw-r--r--xprintf.h58
39 files changed, 9333 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..332b722
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+build/
+tags
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..c9e24b6
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,93 @@
+project(LIBBRAUNR)
+cmake_minimum_required(VERSION 2.6)
+
+include(TestBigEndian)
+
+set(CMAKE_VERBOSE_MAKEFILE OFF)
+set(CMAKE_BUILD_TYPE RelWithDebInfo)
+#set(CMAKE_BUILD_TYPE Debug)
+set(BUILD_SHARED_LIBS TRUE)
+test_big_endian(HOST_BIG_ENDIAN)
+
+add_definitions(-Wall -Wextra -Wmissing-prototypes)
+add_definitions(-D_HOST_BIG_ENDIAN=${HOST_BIG_ENDIAN})
+
+set(CONFIG_NR_CPUS 8)
+set(CONFIG_CPU_L1_SHIFT 6)
+set(CONFIG_MEM_MALLOC 0)
+set(CONFIG_MEM_USE_PHYS 0)
+set(CONFIG_MEM_VERIFY 0)
+
+set(BRAUNR_SOURCES
+ error.c
+ list.c
+ avltree.c
+ rbtree.c
+ rdatree.c
+ mem.c
+ phys.c
+ xprintf.c
+)
+
+set(LIBS -lpthread -lrt)
+
+add_definitions(-DCONFIG_NR_CPUS=${CONFIG_NR_CPUS})
+add_definitions(-DCONFIG_CPU_L1_SHIFT=${CONFIG_CPU_L1_SHIFT})
+
+if(CONFIG_MEM_MALLOC)
+ add_definitions(-DCONFIG_MEM_MALLOC)
+ set(BRAUNR_SOURCES ${BRAUNR_SOURCES} mem_malloc.c)
+endif(CONFIG_MEM_MALLOC)
+
+if(CONFIG_MEM_USE_PHYS)
+ add_definitions(-DCONFIG_MEM_USE_PHYS)
+endif(CONFIG_MEM_USE_PHYS)
+
+if(CONFIG_MEM_VERIFY)
+ add_definitions(-DCONFIG_MEM_VERIFY)
+endif(CONFIG_MEM_VERIFY)
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+add_library(braunr ${BRAUNR_SOURCES})
+target_link_libraries(braunr ${LIBS})
+set_target_properties(braunr PROPERTIES VERSION 1)
+
+add_executable(test_avltree test/test_avltree.c)
+target_link_libraries(test_avltree braunr ${LIBS})
+
+add_executable(test_rbtree test/test_rbtree.c)
+target_link_libraries(test_rbtree braunr ${LIBS})
+
+add_executable(test_rdatree test/test_rdatree.c)
+target_link_libraries(test_rdatree braunr ${LIBS})
+
+add_executable(test_mem_cache test/test_mem_cache.c)
+target_link_libraries(test_mem_cache ${LIBS})
+
+add_executable(test_mem test/test_mem.c)
+target_link_libraries(test_mem braunr ${LIBS})
+
+add_executable(test_mem_offbyone test/test_mem_offbyone.c)
+target_link_libraries(test_mem_offbyone braunr ${LIBS})
+
+add_executable(test_mem_cache_invalid_free test/test_mem_cache_invalid_free.c)
+target_link_libraries(test_mem_cache_invalid_free braunr ${LIBS})
+
+add_executable(test_mem_cache_double_free test/test_mem_cache_double_free.c)
+target_link_libraries(test_mem_cache_double_free braunr ${LIBS})
+
+add_executable(test_mem_cache_write_free test/test_mem_cache_write_free.c)
+target_link_libraries(test_mem_cache_write_free braunr ${LIBS})
+
+add_executable(test_mem_cache_write_beyond test/test_mem_cache_write_beyond.c)
+target_link_libraries(test_mem_cache_write_beyond braunr ${LIBS})
+
+add_executable(test_mem_cache_write_buftag test/test_mem_cache_write_buftag.c)
+target_link_libraries(test_mem_cache_write_buftag braunr ${LIBS})
+
+add_executable(test_phys test/test_phys.c)
+target_link_libraries(test_phys braunr ${LIBS})
+
+add_executable(test_xprintf test/test_xprintf.c)
+target_link_libraries(test_xprintf braunr ${LIBS})
+set_target_properties(test_xprintf PROPERTIES COMPILE_FLAGS -Wno-format)
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..54e10b0
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2009, 2010, 2011 Richard Braun.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/avltree.c b/avltree.c
new file mode 100644
index 0000000..3782fb5
--- /dev/null
+++ b/avltree.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "macros.h"
+#include "avltree.h"
+#include "avltree_i.h"
+
+/*
+ * Return the index of a node in the children array of its parent.
+ *
+ * The parent parameter must not be null, and must be the parent of the
+ * given node.
+ */
+static inline int avltree_index(const struct avltree_node *node,
+ const struct avltree_node *parent)
+{
+ assert(parent != NULL);
+ assert((node == NULL) || (avltree_parent(node) == parent));
+
+ if (parent->children[AVLTREE_LEFT] == node)
+ return AVLTREE_LEFT;
+
+ assert(parent->children[AVLTREE_RIGHT] == node);
+
+ return AVLTREE_RIGHT;
+}
+
+/*
+ * Convert an index of the children array (0 or 1) into a balance value
+ * (-1 or 1).
+ */
+static inline int avltree_i2b(int index)
+{
+ assert(avltree_check_index(index));
+ return (index - 1) | 1;
+}
+
+/*
+ * Return the balance of a node.
+ */
+static inline int avltree_balance(const struct avltree_node *node)
+{
+ int balance;
+
+ balance = (node->parent & AVLTREE_BALANCE_MASK);
+ assert(balance != AVLTREE_BALANCE_INVALID);
+
+ return balance - 1;
+}
+
+/*
+ * Set the parent of a node, retaining its current balance.
+ */
+static inline void avltree_set_parent(struct avltree_node *node,
+ struct avltree_node *parent)
+{
+ assert(avltree_check_alignment(node));
+ assert(avltree_check_alignment(parent));
+ node->parent = (unsigned long)parent
+ | (node->parent & AVLTREE_BALANCE_MASK);
+}
+
+/*
+ * Set the balance of a node, retaining its current parent.
+ */
+static inline void avltree_set_balance(struct avltree_node *node, int balance)
+{
+ assert((-1 <= balance) && (balance <= 1));
+ node->parent = (node->parent & AVLTREE_PARENT_MASK) | (balance + 1);
+}
+
+/*
+ * Rotate an unbalanced tree.
+ *
+ * This function is called after the insertion or the removal of a node made
+ * a tree unbalanced. The node and balance parameters define the rotation root
+ * (the balance being either -2 or 2).
+ *
+ * Return true if the overall height of the subtree rooted at node has
+ * decreased, false otherwise.
+ */
+static int avltree_rotate(struct avltree *tree, struct avltree_node *node,
+ int balance)
+{
+ struct avltree_node *parent, *lnode, *lrnode, *lrlnode, *lrrnode;
+ int left, right, lweight, rweight, lbalance;
+
+ /*
+ * Silence gcc warning.
+ */
+ int index = index;
+
+ assert((balance == -2) || (balance == 2));
+
+ left = avltree_d2i(balance);
+ right = 1 - left;
+ lweight = balance >> 1;
+ rweight = -lweight;
+
+ parent = avltree_parent(node);
+
+ if (likely(parent != NULL))
+ index = avltree_index(node, parent);
+
+ lnode = node->children[left];
+ assert(lnode != NULL);
+ lbalance = avltree_balance(lnode);
+ lrnode = lnode->children[right];
+
+ /*
+ * Left-left case. Note that only a removal can set the left node balance
+ * to 0 when rotating.
+ */
+ if (lbalance != rweight) {
+ node->children[left] = lrnode;
+
+ if (lrnode != NULL)
+ avltree_set_parent(lrnode, node);
+
+ lbalance += rweight;
+
+ lnode->children[right] = node;
+
+ avltree_set_parent(node, lnode);
+ avltree_set_balance(node, -lbalance);
+
+ avltree_set_parent(lnode, parent);
+ avltree_set_balance(lnode, lbalance);
+
+ if (unlikely(parent == NULL))
+ tree->root = lnode;
+ else
+ parent->children[index] = lnode;
+
+ /*
+ * If the adjusted balance is now 0, it means that the height of the
+ * left subtree has decreased.
+ */
+ return (lbalance == 0);
+ }
+
+ /*
+ * Left-right case.
+ */
+
+ assert(lrnode != NULL);
+ lrlnode = lrnode->children[left];
+ lrrnode = lrnode->children[right];
+
+ node->children[left] = lrrnode;
+
+ if (lrrnode != NULL)
+ avltree_set_parent(lrrnode, node);
+
+ lnode->children[right] = lrlnode;
+
+ if (lrlnode != NULL)
+ avltree_set_parent(lrlnode, lnode);
+
+ balance = avltree_balance(lrnode);
+
+ lrnode->children[left] = lnode;
+ avltree_set_parent(lnode, lrnode);
+ avltree_set_balance(lnode, ((balance == rweight) ? lweight : 0));
+
+ lrnode->children[right] = node;
+ avltree_set_parent(node, lrnode);
+ avltree_set_balance(node, ((balance == lweight) ? rweight : 0));
+
+ avltree_set_parent(lrnode, parent);
+ avltree_set_balance(lrnode, 0);
+
+ if (unlikely(parent == NULL))
+ tree->root = lrnode;
+ else
+ parent->children[index] = lrnode;
+
+ /*
+ * The balance of the new subtree root is always 0 in this case, which
+ * implies the overall subtree height has decreased.
+ */
+ return 1;
+}
+
+void avltree_insert_rebalance(struct avltree *tree, struct avltree_node *parent,
+ int index, struct avltree_node *node)
+{
+ int old_balance, new_balance;
+
+ assert(avltree_check_alignment(parent));
+ assert(avltree_check_alignment(node));
+
+ node->parent = (unsigned long)parent | AVLTREE_BALANCE_ZERO;
+ node->children[AVLTREE_LEFT] = NULL;
+ node->children[AVLTREE_RIGHT] = NULL;
+
+ /*
+ * The node is the tree root, and is the single tree entry.
+ */
+ if (parent == NULL) {
+ assert(tree->root == NULL);
+ tree->root = node;
+ return;
+ }
+
+ assert(avltree_check_index(index));
+ assert(parent->children[index] == NULL);
+ parent->children[index] = node;
+
+ for (;;) {
+ node = parent;
+
+ old_balance = avltree_balance(node);
+ new_balance = old_balance + avltree_i2b(index);
+
+ /*
+ * Perfect balance, stop now.
+ */
+ if (new_balance == 0) {
+ avltree_set_balance(node, 0);
+ return;
+ }
+
+ /*
+ * Both previous and new balances are different from 0, which means the
+ * new one has reached -2 or 2. Rebalance now.
+ */
+ if (old_balance != 0)
+ break;
+
+ /*
+ * The new balance is either -1 or 1. Update the current node and
+ * iterate again to propagate the height change.
+ */
+ avltree_set_balance(node, new_balance);
+ parent = avltree_parent(node);
+
+ /*
+ * The tree root was reached.
+ */
+ if (parent == NULL)
+ return;
+
+ index = avltree_index(node, parent);
+ }
+
+ avltree_rotate(tree, node, new_balance);
+}
+
+void avltree_remove(struct avltree *tree, struct avltree_node *node)
+{
+ struct avltree_node *child, *parent;
+ int left, right, index, old_balance, new_balance;
+
+ if (node->children[AVLTREE_LEFT] == NULL)
+ child = node->children[AVLTREE_RIGHT];
+ else if (node->children[AVLTREE_RIGHT] == NULL)
+ child = node->children[AVLTREE_LEFT];
+ else {
+ struct avltree_node *successor;
+
+ /*
+ * Two-children case: replace the node with one of its successors. The
+ * choice of the successor depends on the balance of the node to remove,
+ * as swapping with a node in a heavier subtree can reduce the number of
+ * rotations needed to rebalance the tree.
+ */
+
+ old_balance = avltree_balance(node);
+ right = avltree_d2i(old_balance);
+ left = 1 - right;
+
+ successor = node->children[right];
+
+ while (successor->children[left] != NULL)
+ successor = successor->children[left];
+
+ child = successor->children[right];
+ parent = avltree_parent(node);
+
+ if (unlikely(parent == NULL))
+ tree->root = successor;
+ else
+ parent->children[avltree_index(node, parent)] = successor;
+
+ parent = avltree_parent(successor);
+ index = avltree_index(successor, parent);
+
+ /*
+ * Set parent directly to keep the original balance.
+ */
+ successor->parent = node->parent;
+ successor->children[left] = node->children[left];
+ avltree_set_parent(successor->children[left], successor);
+
+ if (node == parent)
+ parent = successor;
+ else {
+ successor->children[right] = node->children[right];
+ avltree_set_parent(successor->children[right], successor);
+ parent->children[left] = child;
+
+ if (child != NULL)
+ avltree_set_parent(child, parent);
+ }
+
+ goto update_balance;
+ }
+
+ /*
+ * Node has at most one child.
+ */
+
+ parent = avltree_parent(node);
+
+ if (child != NULL)
+ avltree_set_parent(child, parent);
+
+ if (parent == NULL) {
+ tree->root = child;
+ return;
+ } else {
+ index = avltree_index(node, parent);
+ parent->children[index] = child;
+ }
+
+ /*
+ * The node has been removed, update the balance factors. At this point,
+ * the node parent and the index of the node in its parent children array
+ * must be valid.
+ */
+update_balance:
+ for (;;) {
+ node = parent;
+
+ old_balance = avltree_balance(node);
+ new_balance = old_balance - avltree_i2b(index);
+
+ /*
+ * The overall subtree height hasn't decreased, stop now.
+ */
+ if (old_balance == 0) {
+ avltree_set_balance(node, new_balance);
+ break;
+ }
+
+ parent = avltree_parent(node);
+
+ if (parent != NULL)
+ index = avltree_index(node, parent);
+
+ /*
+ * The overall subtree height has decreased. Update the current node and
+ * iterate again to propagate the change.
+ */
+ if (new_balance == 0)
+ avltree_set_balance(node, new_balance);
+ else {
+ int decreased;
+
+ /*
+ * The new balance is either -2 or 2. Rebalance the tree and exit
+ * the loop if the rotation hasn't decreased the overall tree
+ * height.
+ */
+
+ decreased = avltree_rotate(tree, node, new_balance);
+
+ if (!decreased)
+ break;
+ }
+
+ /*
+ * The tree root was reached.
+ */
+ if (parent == NULL)
+ break;
+ }
+}
+
+struct avltree_node * avltree_nearest(struct avltree_node *parent, int index,
+ int direction)
+{
+ assert(avltree_check_index(direction));
+
+ if (parent == NULL)
+ return NULL;
+
+ assert(avltree_check_index(index));
+
+ if (index != direction)
+ return parent;
+
+ return avltree_walk(parent, direction);
+}
+
+struct avltree_node * avltree_firstlast(const struct avltree *tree,
+ int direction)
+{
+ struct avltree_node *prev, *cur;
+
+ assert(avltree_check_index(direction));
+
+ prev = NULL;
+
+ for (cur = tree->root; cur != NULL; cur = cur->children[direction])
+ prev = cur;
+
+ return prev;
+}
+
+struct avltree_node * avltree_walk(struct avltree_node *node, int direction)
+{
+ int left, right;
+
+ assert(avltree_check_index(direction));
+
+ left = direction;
+ right = 1 - left;
+
+ if (node == NULL)
+ return NULL;
+
+ if (node->children[left] != NULL) {
+ node = node->children[left];
+
+ while (node->children[right] != NULL)
+ node = node->children[right];
+ } else {
+ struct avltree_node *parent;
+ int index;
+
+ for (;;) {
+ parent = avltree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = avltree_index(node, parent);
+ node = parent;
+
+ if (index == right)
+ break;
+ }
+ }
+
+ return node;
+}
+
+/*
+ * Return the left-most deepest child node of the given node.
+ */
+static struct avltree_node * avltree_find_deepest(struct avltree_node *node)
+{
+ struct avltree_node *parent;
+
+ assert(node != NULL);
+
+ for (;;) {
+ parent = node;
+ node = node->children[AVLTREE_LEFT];
+
+ if (node == NULL) {
+ node = parent->children[AVLTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+ }
+ }
+}
+
+struct avltree_node * avltree_postwalk_deepest(const struct avltree *tree)
+{
+ struct avltree_node *node;
+
+ node = tree->root;
+
+ if (node == NULL)
+ return NULL;
+
+ return avltree_find_deepest(node);
+}
+
+struct avltree_node * avltree_postwalk_unlink(struct avltree_node *node)
+{
+ struct avltree_node *parent;
+ int index;
+
+ if (node == NULL)
+ return NULL;
+
+ assert(node->children[AVLTREE_LEFT] == NULL);
+ assert(node->children[AVLTREE_RIGHT] == NULL);
+
+ parent = avltree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = avltree_index(node, parent);
+ parent->children[index] = NULL;
+ node = parent->children[AVLTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+
+ return avltree_find_deepest(node);
+}
diff --git a/avltree.h b/avltree.h
new file mode 100644
index 0000000..3f73e05
--- /dev/null
+++ b/avltree.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * AVL tree.
+ *
+ * This type of tree is well suited for lookup intensive applications. In
+ * the more common case where there can be as many insertions/removals as
+ * lookups, or the amount of changes can't be determined, red-black trees
+ * provide better average performance.
+ */
+
+#ifndef _AVLTREE_H
+#define _AVLTREE_H
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "macros.h"
+
+/*
+ * Indexes of the left and right nodes in the children array of a node.
+ */
+#define AVLTREE_LEFT 0
+#define AVLTREE_RIGHT 1
+
+/*
+ * AVL node.
+ */
+struct avltree_node;
+
+/*
+ * AVL tree.
+ */
+struct avltree;
+
+/*
+ * Static tree initializer.
+ */
+#define AVLTREE_INITIALIZER { NULL }
+
+#include "avltree_i.h"
+
+/*
+ * Initialize a tree.
+ */
+static inline void avltree_init(struct avltree *tree)
+{
+ tree->root = NULL;
+}
+
+/*
+ * Initialize a node.
+ *
+ * A node is in no tree when its parent points to itself.
+ */
+static inline void avltree_node_init(struct avltree_node *node)
+{
+ assert(avltree_check_alignment(node));
+
+ node->parent = (unsigned long)node | AVLTREE_BALANCE_ZERO;
+ node->children[AVLTREE_LEFT] = NULL;
+ node->children[AVLTREE_RIGHT] = NULL;
+}
+
+/*
+ * Return true if node is in no tree.
+ */
+static inline int avltree_node_unlinked(const struct avltree_node *node)
+{
+ return avltree_parent(node) == node;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define avltree_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return true if tree is empty.
+ */
+static inline int avltree_empty(const struct avltree *tree)
+{
+ return tree->root == NULL;
+}
+
+/*
+ * Look up a node in a tree.
+ *
+ * Note that implementing the lookup algorithm as a macro gives two benefits:
+ * First, it avoids the overhead of a callback function. Next, the type of the
+ * cmp_fn parameter isn't rigid. The only guarantee offered by this
+ * implementation is that the key parameter is the first parameter given to
+ * cmp_fn. This way, users can pass only the value they need for comparison
+ * instead of e.g. allocating a full structure on the stack.
+ */
+#define avltree_lookup(tree, key, cmp_fn) \
+MACRO_BEGIN \
+ struct avltree_node *cur; \
+ int diff; \
+ \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(key, cur); \
+ \
+ if (diff == 0) \
+ break; \
+ \
+ cur = cur->children[avltree_d2i(diff)]; \
+ } \
+ \
+ cur; \
+MACRO_END
+
+/*
+ * Look up a node or one of its nearest nodes in a tree.
+ *
+ * This macro essentially acts as avltree_lookup() but if no entry matched
+ * the key, an additional step is performed to obtain the next or previous
+ * node, depending on the direction (left or right).
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * avltree_lookup().
+ */
+#define avltree_lookup_nearest(tree, key, cmp_fn, dir) \
+MACRO_BEGIN \
+ struct avltree_node *cur, *prev; \
+ int diff, index; \
+ \
+ prev = NULL; \
+ index = 0; \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(key, cur); \
+ \
+ if (diff == 0) \
+ break; \
+ \
+ prev = cur; \
+ index = avltree_d2i(diff); \
+ cur = cur->children[index]; \
+ } \
+ \
+ if (cur == NULL) \
+ cur = avltree_nearest(prev, index, dir); \
+ \
+ cur; \
+MACRO_END
+
+/*
+ * Insert a node in a tree.
+ *
+ * This macro performs a standard lookup to obtain the insertion point of
+ * the given node in the tree (it is assumed that the inserted node never
+ * compares equal to any other entry in the tree) and links the node. It
+ * then updates and checks balances of the parent nodes, and rebalances the
+ * tree if necessary.
+ *
+ * Unlike avltree_lookup(), the cmp_fn parameter must compare two complete
+ * entries, so it is suggested to use two different comparison inline
+ * functions, such as myobj_cmp_lookup() and myobj_cmp_insert(). There is no
+ * guarantee about the order of the nodes given to the comparison function.
+ */
+#define avltree_insert(tree, node, cmp_fn) \
+MACRO_BEGIN \
+ struct avltree_node *cur, *prev; \
+ int diff, index; \
+ \
+ prev = NULL; \
+ index = 0; \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(node, cur); \
+ assert(diff != 0); \
+ prev = cur; \
+ index = avltree_d2i(diff); \
+ cur = cur->children[index]; \
+ } \
+ \
+ avltree_insert_rebalance(tree, prev, index, node); \
+MACRO_END
+
+/*
+ * Look up a node/slot pair in a tree.
+ *
+ * This macro essentially acts as avltree_lookup() but in addition to a node,
+ * it also returns a slot, which identifies an insertion point in the tree.
+ * If the returned node is null, the slot can be used by avltree_insert_slot()
+ * to insert without the overhead of an additional lookup. The slot is a
+ * simple unsigned long integer.
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * avltree_lookup().
+ */
+#define avltree_lookup_slot(tree, key, cmp_fn, slot) \
+MACRO_BEGIN \
+ struct avltree_node *cur, *prev; \
+ int diff, index; \
+ \
+ prev = NULL; \
+ index = 0; \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(key, cur); \
+ \
+ if (diff == 0) \
+ break; \
+ \
+ prev = cur; \
+ index = avltree_d2i(diff); \
+ cur = cur->children[index]; \
+ } \
+ \
+ (slot) = avltree_slot(prev, index); \
+ cur; \
+MACRO_END
+
+/*
+ * Insert a node at an insertion point in a tree.
+ *
+ * This macro essentially acts as avltree_insert() except that it doesn't
+ * obtain the insertion point with a standard lookup. The insertion point
+ * is obtained by calling avltree_lookup_slot(). In addition, the new node
+ * must not compare equal to an existing node in the tree (i.e. the slot
+ * must denote a null node).
+ */
+#define avltree_insert_slot(tree, slot, node) \
+MACRO_BEGIN \
+ struct avltree_node *parent; \
+ int index; \
+ \
+ parent = avltree_slot_parent(slot); \
+ index = avltree_slot_index(slot); \
+ avltree_insert_rebalance(tree, parent, index, node); \
+MACRO_END
+
+/*
+ * Remove a node from a tree.
+ *
+ * After completion, the node is stale.
+ */
+void avltree_remove(struct avltree *tree, struct avltree_node *node);
+
+/*
+ * Return the first node of a tree.
+ */
+#define avltree_first(tree) avltree_firstlast(tree, AVLTREE_LEFT)
+
+/*
+ * Return the last node of a tree.
+ */
+#define avltree_last(tree) avltree_firstlast(tree, AVLTREE_RIGHT)
+
+/*
+ * Return the node previous to the given node.
+ */
+#define avltree_prev(node) avltree_walk(node, AVLTREE_LEFT)
+
+/*
+ * Return the node next to the given node.
+ */
+#define avltree_next(node) avltree_walk(node, AVLTREE_RIGHT)
+
+/*
+ * Forge a loop to process all nodes of a tree, removing them when visited.
+ *
+ * This macro can only be used to destroy a tree, so that the resources used
+ * by the entries can be released by the user. It basically removes all nodes
+ * without doing any balance checking.
+ *
+ * After completion, all nodes and the tree root member are stale.
+ */
+#define avltree_for_each_remove(tree, node, tmp) \
+for (node = avltree_postwalk_deepest(tree), \
+ tmp = avltree_postwalk_unlink(node); \
+ node != NULL; \
+ node = tmp, tmp = avltree_postwalk_unlink(node)) \
+
+#endif /* _AVLTREE_H */
diff --git a/avltree_i.h b/avltree_i.h
new file mode 100644
index 0000000..e09c7f4
--- /dev/null
+++ b/avltree_i.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AVLTREE_I_H
+#define _AVLTREE_I_H
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "macros.h"
+
+/*
+ * AVL node structure.
+ *
+ * To reduce the number of branches and the instruction cache footprint,
+ * the left and right child pointers are stored in an array, and the symmetry
+ * of most tree operations is exploited by using left/right variables when
+ * referring to children.
+ *
+ * In addition, this implementation assumes that all nodes are at least 4-byte
+ * aligned, so that the two least significant bits of the parent member can
+ * be used to store the balance of the node. This is true for all modern 32
+ * and 64 bits architectures, as long as the nodes aren't embedded in
+ * structures with special alignment constraints such as member packing.
+ *
+ * To avoid issues with signed operations when handling a negative balance,
+ * the raw values stored in the parent member are always positive, and are
+ * actually one more than the value they represent. It is then easy to obtain
+ * a balance of -1 from a raw value of 0.
+ */
+struct avltree_node {
+ unsigned long parent;
+ struct avltree_node *children[2];
+};
+
+/*
+ * AVL tree structure.
+ */
+struct avltree {
+ struct avltree_node *root;
+};
+
+/*
+ * Masks applied on the parent member of a node to obtain either the
+ * balance or the parent address.
+ */
+#define AVLTREE_BALANCE_MASK 0x3UL
+#define AVLTREE_PARENT_MASK (~AVLTREE_BALANCE_MASK)
+
+/*
+ * Special raw balance values.
+ */
+#define AVLTREE_BALANCE_ZERO 1UL
+#define AVLTREE_BALANCE_INVALID AVLTREE_BALANCE_MASK
+
+/*
+ * Masks applied on slots to obtain either the child index or the parent
+ * address.
+ */
+#define AVLTREE_SLOT_INDEX_MASK 0x1UL
+#define AVLTREE_SLOT_PARENT_MASK (~AVLTREE_SLOT_INDEX_MASK)
+
+/*
+ * Return true if the given pointer is suitably aligned.
+ */
+static inline int avltree_check_alignment(const struct avltree_node *node)
+{
+ return ((unsigned long)node & AVLTREE_BALANCE_MASK) == 0;
+}
+
+/*
+ * Return true if the given index is a valid child index.
+ */
+static inline int avltree_check_index(int index)
+{
+ return index == (index & 1);
+}
+
+/*
+ * Convert the result of a comparison (or a balance) into an index in the
+ * children array (0 or 1).
+ *
+ * This function is mostly used when looking up a node.
+ */
+static inline int avltree_d2i(int diff)
+{
+ return !(diff <= 0);
+}
+
+/*
+ * Return the parent of a node.
+ */
+static inline struct avltree_node *
+avltree_parent(const struct avltree_node *node)
+{
+ return (struct avltree_node *)(node->parent & AVLTREE_PARENT_MASK);
+}
+
+/*
+ * Translate an insertion point into a slot.
+ */
+static inline unsigned long avltree_slot(struct avltree_node *parent, int index)
+{
+ assert(avltree_check_alignment(parent));
+ assert(avltree_check_index(index));
+ return (unsigned long)parent | index;
+}
+
+/*
+ * Extract the parent address from a slot.
+ */
+static inline struct avltree_node * avltree_slot_parent(unsigned long slot)
+{
+ return (struct avltree_node *)(slot & AVLTREE_SLOT_PARENT_MASK);
+}
+
+/*
+ * Extract the index from a slot.
+ */
+static inline int avltree_slot_index(unsigned long slot)
+{
+ return slot & AVLTREE_SLOT_INDEX_MASK;
+}
+
+/*
+ * Insert a node in a tree, rebalancing it if necessary.
+ *
+ * The index parameter is the index in the children array of the parent where
+ * the new node is to be inserted. It is ignored if the parent is null.
+ *
+ * This function is intended to be used by the avltree_insert() macro only.
+ */
+void avltree_insert_rebalance(struct avltree *tree, struct avltree_node *parent,
+ int index, struct avltree_node *node);
+
+/*
+ * Return the previous or next node relative to a location in a tree.
+ *
+ * The parent and index parameters define the location, which can be empty.
+ * The direction parameter is either AVLTREE_LEFT (to obtain the previous
+ * node) or AVLTREE_RIGHT (to obtain the next one).
+ */
+struct avltree_node * avltree_nearest(struct avltree_node *parent, int index,
+ int direction);
+
+/*
+ * Return the first or last node of a tree.
+ *
+ * The direction parameter is either AVLTREE_LEFT (to obtain the first node)
+ * or AVLTREE_RIGHT (to obtain the last one).
+ */
+struct avltree_node * avltree_firstlast(const struct avltree *tree,
+ int direction);
+
+/*
+ * Return the node next to, or previous to the given node.
+ *
+ * The direction parameter is either AVLTREE_LEFT (to obtain the previous node)
+ * or AVLTREE_RIGHT (to obtain the next one).
+ */
+struct avltree_node * avltree_walk(struct avltree_node *node, int direction);
+
+/*
+ * Return the left-most deepest node of a tree, which is the starting point of
+ * the postorder traversal performed by avltree_for_each_remove().
+ */
+struct avltree_node * avltree_postwalk_deepest(const struct avltree *tree);
+
+/*
+ * Unlink a node from its tree and return the next (right) node in postorder.
+ */
+struct avltree_node * avltree_postwalk_unlink(struct avltree_node *node);
+
+#endif /* _AVLTREE_I_H */
diff --git a/cpu.h b/cpu.h
new file mode 100644
index 0000000..df2fff4
--- /dev/null
+++ b/cpu.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPU_H
+#define _CPU_H
+
+#include <sched.h>
+
+#include "macros.h"
+
+/*
+ * Maximum number of supported processors.
+ */
+#define NR_CPUS CONFIG_NR_CPUS
+
+/*
+ * Filter invalid values.
+ *
+ * The maximum number of processors must be a power-of-two in order to avoid
+ * divisions.
+ */
+#if (NR_CPUS < 1) || !ISP2(NR_CPUS)
+#error "invalid number of configured processors"
+#endif
+
+/*
+ * L1 cache line shift and size.
+ */
+#define CPU_L1_SHIFT CONFIG_CPU_L1_SHIFT
+#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
+
+/*
+ * Return the ID of the currently running CPU.
+ *
+ * This implementation uses a glibc-specific function that relies on a
+ * Linux-specific system call to obtain the CPU ID. If this function fails
+ * (e.g. when run in valgrind), 0 is returned.
+ *
+ * The returned CPU ID cannot be greater than the maximum number of supported
+ * processors.
+ */
+static inline int cpu_id(void)
+{
+#if NR_CPUS == 1
+ return 0;
+#else
+ int id;
+
+ id = sched_getcpu();
+
+ if (id == -1)
+ return 0;
+ else if (id >= NR_CPUS)
+ id &= (NR_CPUS - 1);
+
+ return id;
+#endif
+}
+
+#endif /* _CPU_H */
diff --git a/error.c b/error.c
new file mode 100644
index 0000000..116473d
--- /dev/null
+++ b/error.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "error.h"
+#include "macros.h"
+
+/*
+ * Error message table.
+ *
+ * This table must be consistent with the enum defined in error.h.
+ */
+static const char *errormsg_table[] = {
+ "success",
+ "unknown error",
+ "invalid argument",
+ "not enough space",
+ "invalid format",
+ "not enough resources",
+ "operation not permitted",
+ "resource busy",
+ "memory limit exceeded",
+ "operation timed out",
+ "operation would block",
+ "entry not found",
+ "internal memory allocator failure"
+};
+
+#define ERRORMSG_TABLE_SIZE ARRAY_SIZE(errormsg_table)
+
+const char * error_str(unsigned int error)
+{
+ if (error >= ERRORMSG_TABLE_SIZE)
+ return "invalid error code";
+
+ return errormsg_table[error];
+}
+
+unsigned int error_from_errno(int errno_code)
+{
+ switch (errno_code) {
+ case 0:
+ return ERR_SUCCESS;
+ case EINVAL:
+ return ERR_INVAL;
+ case ENOMEM:
+ return ERR_NOMEM;
+ case EAGAIN:
+ return ERR_NORES;
+ case EPERM:
+ return ERR_PERM;
+ case EBUSY:
+ return ERR_BUSY;
+ case ETIMEDOUT:
+ return ERR_TIMEDOUT;
+ default:
+ fprintf(stderr, "unable to translate errno code (%d)\n", errno_code);
+ return ERR_UNKNOWN;
+ }
+}
+
+void error_die(unsigned int error)
+{
+ fprintf(stderr, "process terminating, reason: %s\n", error_str(error));
+ abort();
+}
diff --git a/error.h b/error.h
new file mode 100644
index 0000000..81babf7
--- /dev/null
+++ b/error.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ERROR_H
+#define _ERROR_H
+
+#include "macros.h"
+
+/*
+ * List of errors this library can return.
+ *
+ * ERR_SUCCESS is guaranteed to be 0, allowing code such as :
+ *
+ * error = do_smth();
+ *
+ * if (error) {
+ * ...;
+ * }
+ */
+enum {
+ ERR_SUCCESS,
+ ERR_UNKNOWN,
+ ERR_INVAL,
+ ERR_NOMEM,
+ ERR_FORMAT,
+ ERR_NORES,
+ ERR_PERM,
+ ERR_BUSY,
+ ERR_MEMLIM,
+ ERR_TIMEDOUT,
+ ERR_WOULDBLOCK,
+ ERR_LOOKUP,
+ ERR_MEM_CACHE
+};
+
+/*
+ * Return the message matching the given error.
+ *
+ * The returned address points to a statically allocated, read only,
+ * null-terminated string literal. The caller must not attempt to use it
+ * for anything else than error reporting.
+ */
+const char * error_str(unsigned int error);
+
+/*
+ * Map standard error codes to error values.
+ *
+ * This function accepts a subset of the standard error codes in errno.h.
+ * When called, and if the errno value is handled, it will return the
+ * corresponding ERR_xxx code. Otherwise ERR_UNKNOWN is returned.
+ */
+unsigned int error_from_errno(int errno_code);
+
+/*
+ * Exit the current process, reporting an error.
+ *
+ * This function will report the given error and make the process exit,
+ * using the error code as the exit() parameter.
+ */
+void __noreturn error_die(unsigned int error);
+
+#endif /* _ERROR_H */
diff --git a/hash.h b/hash.h
new file mode 100644
index 0000000..ca60730
--- /dev/null
+++ b/hash.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Hash functions for integers and strings.
+ *
+ * Integer hashing follows Thomas Wang's paper about his 32/64-bits mix
+ * functions :
+ * - http://www.concentric.net/~Ttwang/tech/inthash.htm
+ *
+ * String hashing uses a variant of the djb2 algorithm with k=31, as in
+ * the implementation of the hashCode() method of the Java String class :
+ * - http://www.javamex.com/tutorials/collections/hash_function_technical.shtml
+ *
+ * Note that this algorithm isn't suitable to obtain usable 64-bits hashes
+ * and is expected to only serve as an array index producer.
+ *
+ * These functions all have a bits parameter that indicates the number of
+ * relevant bits the caller is interested in. When returning a hash, its
+ * value must be truncated so that it can fit in the requested bit size.
+ * It can be used by the implementation to select high or low bits, depending
+ * on their relative randomness. To get complete, unmasked hashes, use the
+ * HASH_ALLBITS macro.
+ */
+
+#ifndef _HASH_H
+#define _HASH_H
+
+#include <stdint.h>
+
+#ifdef __LP64__
+#define HASH_ALLBITS 64
+#define hash_long(n, bits) hash_int64(n, bits)
+#else /* __LP64__ */
+#define HASH_ALLBITS 32
+#define hash_long(n, bits) hash_int32(n, bits)
+#endif
+
+static inline uint32_t hash_int32(uint32_t n, unsigned int bits)
+{
+ uint32_t hash;
+
+ hash = n;
+ hash = ~hash + (hash << 15);
+ hash ^= (hash >> 12);
+ hash += (hash << 2);
+ hash ^= (hash >> 4);
+ hash += (hash << 3) + (hash << 11);
+ hash ^= (hash >> 16);
+
+ return hash >> (32 - bits);
+}
+
+static inline uint64_t hash_int64(uint64_t n, unsigned int bits)
+{
+ uint64_t hash;
+
+ hash = n;
+ hash = ~hash + (hash << 21);
+ hash ^= (hash >> 24);
+ hash += (hash << 3) + (hash << 8);
+ hash ^= (hash >> 14);
+ hash += (hash << 2) + (hash << 4);
+ hash ^= (hash >> 28);
+ hash += (hash << 31);
+
+ return hash >> (64 - bits);
+}
+
+static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
+{
+ return hash_long((unsigned long)ptr, bits);
+}
+
+static inline unsigned long hash_str(const char *str, unsigned int bits)
+{
+ unsigned long hash;
+ char c;
+
+ for (hash = 0; (c = *str) != '\0'; str++)
+ hash = ((hash << 5) - hash) + c;
+
+ return hash & ((1 << bits) - 1);
+}
+
+#endif /* _HASH_H */
diff --git a/list.c b/list.c
new file mode 100644
index 0000000..6f978bc
--- /dev/null
+++ b/list.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * List sorting implementation.
+ *
+ * In addition to most common list operations, this implementation includes
+ * a sort operation. The algorithm used is a variant of the bottom-up
+ * mergesort algorithm. It has the following properties :
+ * - It is iterative (no recursion overhead).
+ * - It is stable (the relative order of equal entries is preserved).
+ * - It only requires constant additional space (as it works on linked lists).
+ * - It performs at O(n log n) for average and worst cases.
+ * - It is adaptive, performing faster on already sorted lists.
+ */
+
+#include "list.h"
+
+/*
+ * Split input_list into two lists.
+ *
+ * The list_size first entries of input_list are moved into left_list while
+ * the "right" list is actually the list_size first entries of input_list
+ * after completion.
+ */
+static void _list_divide(struct list *input_list, struct list *left_list,
+ unsigned int list_size)
+{
+ struct list *node;
+ unsigned int i;
+
+ node = input_list->next;
+
+ for (i = 0; (i < list_size) && !list_end(input_list, node); i++)
+ node = node->next;
+
+ list_split(left_list, input_list, node);
+}
+
+/*
+ * Merge left_list and right_list at the tail of output_list.
+ */
+static void _list_merge(struct list *left_list, struct list *right_list,
+ struct list *output_list, unsigned int right_list_size,
+ list_sort_cmp_fn_t cmp_fn)
+{
+ struct list *left, *right;
+
+ left = left_list->prev;
+ right = right_list->next;
+
+ /*
+ * Try to concatenate lists instead of merging first. This reduces
+ * complexity for already sorted lists.
+ */
+ if (!list_end(left_list, left)
+ && ((right_list_size > 0) && !list_end(right_list, right))
+ && (cmp_fn(left, right) <= 0)) {
+ struct list tmp_list;
+
+ list_concat(output_list, left_list);
+ list_init(left_list);
+
+ while ((right_list_size > 0) && !list_end(right_list, right)) {
+ right = right->next;
+ right_list_size--;
+ }
+
+ list_split(&tmp_list, right_list, right);
+ list_concat(output_list, &tmp_list);
+ return;
+ }
+
+ left = left_list->next;
+
+ while (!list_end(left_list, left)
+ || ((right_list_size > 0) && !list_end(right_list, right)))
+ if (((right_list_size == 0) || list_end(right_list, right))
+ || (!list_end(left_list, left) && (cmp_fn(left, right) <= 0))) {
+ list_remove(left);
+ list_insert_tail(output_list, left);
+ left = left_list->next;
+ } else {
+ list_remove(right);
+ list_insert_tail(output_list, right);
+ right = right_list->next;
+ right_list_size--;
+ }
+}
+
+void list_sort(struct list *list, list_sort_cmp_fn_t cmp_fn)
+{
+ struct list left_list, output_list;
+ unsigned int list_size, nr_merges;
+
+ list_init(&left_list);
+ list_init(&output_list);
+
+ for (list_size = 1; /* no condition */; list_size <<= 1) {
+ for (nr_merges = 0; /* no condition */; nr_merges++) {
+ if (list_empty(list))
+ break;
+
+ _list_divide(list, &left_list, list_size);
+ _list_merge(&left_list, list, &output_list, list_size, cmp_fn);
+ }
+
+ list_concat(list, &output_list);
+ list_init(&output_list);
+
+ if (nr_merges <= 1)
+ return;
+ }
+}
diff --git a/list.h b/list.h
new file mode 100644
index 0000000..c8f4c7e
--- /dev/null
+++ b/list.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Simple doubly-linked list.
+ */
+
+#ifndef _LIST_H
+#define _LIST_H
+
+#include <stddef.h>
+
+#include "macros.h"
+
+/*
+ * Structure used as both head and node.
+ *
+ * This implementation relies on using the same type for both heads and nodes.
+ *
+ * It is recommended to encode the use of struct list variables in their names,
+ * e.g. struct list free_list or struct list free_objects is a good hint for a
+ * list of free objects. A declaration like struct list free_node clearly
+ * indicates it is used as part of a node in the free list.
+ */
+struct list {
+ struct list *prev;
+ struct list *next;
+};
+
+/*
+ * Function type for list sorting.
+ *
+ * Use list_entry() to obtain the entries from the given nodes.
+ *
+ * See list_sort().
+ */
+typedef int (*list_sort_cmp_fn_t)(struct list *node_a, struct list *node_b);
+
+/*
+ * Static list initializer.
+ */
+#define LIST_INITIALIZER(list) { &(list), &(list) }
+
+/*
+ * Initialize a list.
+ */
+static inline void list_init(struct list *list)
+{
+ list->prev = list;
+ list->next = list;
+}
+
+/*
+ * Initialize a list node.
+ *
+ * An entry is in no list when its node members point to NULL.
+ */
+static inline void list_node_init(struct list *node)
+{
+ node->prev = NULL;
+ node->next = NULL;
+}
+
+/*
+ * Return true if node is in no list.
+ */
+static inline int list_node_unlinked(const struct list *node)
+{
+ return node->prev == NULL;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define list_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return the first node of a list.
+ */
+static inline struct list * list_first(const struct list *list)
+{
+ return list->next;
+}
+
+/*
+ * Return the last node of a list.
+ */
+static inline struct list * list_last(const struct list *list)
+{
+ return list->prev;
+}
+
+/*
+ * Return the node next to the given node.
+ */
+static inline struct list * list_next(const struct list *node)
+{
+ return node->next;
+}
+
+/*
+ * Return the node previous to the given node.
+ */
+static inline struct list * list_prev(const struct list *node)
+{
+ return node->prev;
+}
+
+/*
+ * Get the first entry of a list.
+ */
+#define list_first_entry(list, type, member) \
+ list_entry(list_first(list), type, member)
+
+/*
+ * Get the last entry of a list.
+ */
+#define list_last_entry(list, type, member) \
+ list_entry(list_last(list), type, member)
+
+/*
+ * Return true if node is after the last or before the first node of the list.
+ */
+static inline int list_end(const struct list *list, const struct list *node)
+{
+ return list == node;
+}
+
+/*
+ * Return true if list is empty.
+ */
+static inline int list_empty(const struct list *list)
+{
+ return list == list->next;
+}
+
+/*
+ * Return true if list contains exactly one node.
+ */
+static inline int list_singular(const struct list *list)
+{
+ return (list != list->next) && (list->next == list->prev);
+}
+
+/*
+ * Split list2 by moving its nodes up to (but not including) the given
+ * node into list1 (which can be in a stale state).
+ *
+ * If list2 is empty, or node is list2 or list2->next, nothing is done.
+ */
+static inline void list_split(struct list *list1, struct list *list2,
+ struct list *node)
+{
+ if (list_empty(list2) || (list2->next == node) || list_end(list2, node))
+ return;
+
+ list1->next = list2->next;
+ list1->next->prev = list1;
+
+ list1->prev = node->prev;
+ node->prev->next = list1;
+
+ list2->next = node;
+ node->prev = list2;
+}
+
+/*
+ * Append the nodes of list2 at the end of list1.
+ *
+ * After completion, list2 is stale.
+ */
+static inline void list_concat(struct list *list1, const struct list *list2)
+{
+ struct list *last1, *first2, *last2;
+
+ if (list_empty(list2))
+ return;
+
+ last1 = list1->prev;
+ first2 = list2->next;
+ last2 = list2->prev;
+
+ last1->next = first2;
+ first2->prev = last1;
+
+ last2->next = list1;
+ list1->prev = last2;
+}
+
+/*
+ * Set the new head of a list.
+ *
+ * This function is an optimized version of :
+ * list_init(&new_list);
+ * list_concat(&new_list, &old_list);
+ *
+ * After completion, old_head is stale.
+ */
+static inline void list_set_head(struct list *new_head,
+ const struct list *old_head)
+{
+ if (list_empty(old_head)) {
+ list_init(new_head);
+ return;
+ }
+
+ *new_head = *old_head;
+ new_head->next->prev = new_head;
+ new_head->prev->next = new_head;
+}
+
+/*
+ * Add a node between two nodes.
+ */
+static inline void list_add(struct list *prev, struct list *next,
+ struct list *node)
+{
+ next->prev = node;
+ node->next = next;
+
+ prev->next = node;
+ node->prev = prev;
+}
+
+/*
+ * Insert a node at the head of a list.
+ */
+static inline void list_insert(struct list *list, struct list *node)
+{
+ list_add(list, list->next, node);
+}
+
+/*
+ * Insert a node at the tail of a list.
+ */
+static inline void list_insert_tail(struct list *list, struct list *node)
+{
+ list_add(list->prev, list, node);
+}
+
+/*
+ * Insert a node before another node.
+ */
+static inline void list_insert_before(struct list *next, struct list *node)
+{
+ list_add(next->prev, next, node);
+}
+
+/*
+ * Insert a node after another node.
+ */
+static inline void list_insert_after(struct list *prev, struct list *node)
+{
+ list_add(prev, prev->next, node);
+}
+
+/*
+ * Remove a node from a list.
+ *
+ * After completion, the node is stale.
+ */
+static inline void list_remove(struct list *node)
+{
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+}
+
+/*
+ * Forge a loop to process all nodes of a list.
+ *
+ * The node must not be altered during the loop.
+ */
+#define list_for_each(list, node) \
+for (node = list_first(list); \
+ !list_end(list, node); \
+ node = list_next(node))
+
+/*
+ * Forge a loop to process all nodes of a list.
+ */
+#define list_for_each_safe(list, node, tmp) \
+for (node = list_first(list), tmp = list_next(node); \
+ !list_end(list, node); \
+ node = tmp, tmp = list_next(node))
+
+/*
+ * Version of list_for_each() that processes nodes backward.
+ */
+#define list_for_each_reverse(list, node) \
+for (node = list_last(list); \
+ !list_end(list, node); \
+ node = list_prev(node))
+
+/*
+ * Version of list_for_each_safe() that processes nodes backward.
+ */
+#define list_for_each_reverse_safe(list, node, tmp) \
+for (node = list_last(list), tmp = list_prev(node); \
+ !list_end(list, node); \
+ node = tmp, tmp = list_prev(node))
+
+/*
+ * Forge a loop to process all entries of a list.
+ *
+ * The entry node must not be altered during the loop.
+ */
+#define list_for_each_entry(list, entry, member) \
+for (entry = list_entry(list_first(list), typeof(*entry), member); \
+ !list_end(list, &entry->member); \
+ entry = list_entry(list_next(&entry->member), typeof(*entry), \
+ member))
+
+/*
+ * Forge a loop to process all entries of a list.
+ */
+#define list_for_each_entry_safe(list, entry, tmp, member) \
+for (entry = list_entry(list_first(list), typeof(*entry), member), \
+ tmp = list_entry(list_next(&entry->member), typeof(*entry), \
+ member); \
+ !list_end(list, &entry->member); \
+ entry = tmp, tmp = list_entry(list_next(&entry->member), \
+ typeof(*entry), member))
+
+/*
+ * Version of list_for_each_entry() that processes entries backward.
+ */
+#define list_for_each_entry_reverse(list, entry, member) \
+for (entry = list_entry(list_last(list), typeof(*entry), member); \
+ !list_end(list, &entry->member); \
+ entry = list_entry(list_prev(&entry->member), typeof(*entry), \
+ member))
+
+/*
+ * Version of list_for_each_entry_safe() that processes entries backward.
+ */
+#define list_for_each_entry_reverse_safe(list, entry, tmp, member) \
+for (entry = list_entry(list_last(list), typeof(*entry), member), \
+ tmp = list_entry(list_prev(&entry->member), typeof(*entry), \
+ member); \
+ !list_end(list, &entry->member); \
+ entry = tmp, tmp = list_entry(list_prev(&entry->member), \
+ typeof(*entry), member))
+
+/*
+ * Sort a list.
+ */
+void list_sort(struct list *list, list_sort_cmp_fn_t cmp_fn);
+
+#endif /* _LIST_H */
diff --git a/macros.h b/macros.h
new file mode 100644
index 0000000..9a55e06
--- /dev/null
+++ b/macros.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Helper macros.
+ */
+
+#ifndef _MACROS_H
+#define _MACROS_H
+
+#include <stddef.h>
+
+#define MACRO_BEGIN ({
+#define MACRO_END })
+
+#define XQUOTE(x) #x
+#define QUOTE(x) XQUOTE(x)
+
+#define STRLEN(x) (sizeof(x) - 1)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
+#define ISP2(x) P2ALIGNED(x, x)
+#define P2ALIGN(x, a) ((x) & -(a))
+#define P2ROUND(x, a) (-(-(x) & -(a)))
+#define P2END(x, a) (-(~(x) & -(a)))
+
+#define structof(ptr, type, member) \
+ ((type *)((char *)ptr - offsetof(type, member)))
+
+#define alignof(x) __alignof__(x)
+
+#define likely(expr) __builtin_expect(!!(expr), 1)
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+
+#define barrier() asm volatile("" : : : "memory")
+
+#define __noreturn __attribute__((noreturn))
+#define __aligned(x) __attribute__((aligned(x)))
+
+#define __format_printf(fmt, args) \
+ __attribute__((format(printf, fmt, args)))
+
+#endif /* _MACROS_H */
diff --git a/mem.c b/mem.c
new file mode 100644
index 0000000..fa255e6
--- /dev/null
+++ b/mem.c
@@ -0,0 +1,1785 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Object caching and general purpose memory allocator.
+ *
+ * This allocator is based on the following works :
+ * - "The Slab Allocator: An Object-Caching Kernel Memory Allocator",
+ * by Jeff Bonwick.
+ *
+ * It allows the allocation of objects (i.e. fixed-size typed buffers) from
+ * caches and is efficient in both space and time. This implementation follows
+ * many of the indications from the paper mentioned. The most notable
+ * differences are outlined below.
+ *
+ * The per-cache self-scaling hash table for buffer-to-bufctl conversion,
+ * described in 3.2.3 "Slab Layout for Large Objects", has been replaced by
+ * an AVL tree storing slabs, sorted by address. The use of a self-balancing
+ * tree for buffer-to-slab conversions provides a few advantages over a hash
+ * table. Unlike a hash table, a BST provides a "lookup nearest" operation,
+ * so obtaining the slab data (whether it is embedded in the slab or off
+ * slab) from a buffer address simply consists of a "lookup nearest towards
+ * 0" tree search. Storing slabs instead of buffers also considerably reduces
+ * the number of elements to retain. Finally, a self-balancing tree is a true
+ * self-scaling data structure, whereas a hash table requires periodic
+ * maintenance and complete resizing, which is expensive. The only drawback is
+ * that releasing a buffer to the slab layer takes logarithmic time instead of
+ * constant time. But as the data set size is kept reasonable (because slabs
+ * are stored instead of buffers) and because the CPU pool layer services most
+ * requests, avoiding many accesses to the slab layer, it is considered an
+ * acceptable tradeoff.
+ *
+ * This implementation uses per-cpu pools of objects, which service most
+ * allocation requests. These pools act as caches (but are named differently
+ * to avoid confusion with CPU caches) that reduce contention on multiprocessor
+ * systems. When a pool is empty and cannot provide an object, it is filled by
+ * transferring multiple objects from the slab layer. The symmetric case is
+ * handled likewise.
+ */
+
+#include <time.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <assert.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+
+#include "cpu.h"
+#include "mem.h"
+#include "list.h"
+#include "error.h"
+#include "macros.h"
+#include "avltree.h"
+
+/*
+ * The system page size.
+ *
+ * This macro actually expands to a global variable that is set on
+ * initialization.
+ */
+#define PAGE_SIZE ((unsigned long)_pagesize)
+
+/*
+ * Minimum required alignment.
+ */
+#define MEM_ALIGN_MIN 8
+
+/*
+ * Minimum number of buffers per slab.
+ *
+ * This value is ignored when the slab size exceeds a threshold.
+ */
+#define MEM_MIN_BUFS_PER_SLAB 8
+
+/*
+ * Special slab size beyond which the minimum number of buffers per slab is
+ * ignored when computing the slab size of a cache.
+ */
+#define MEM_SLAB_SIZE_THRESHOLD (8 * PAGE_SIZE)
+
+/*
+ * Special buffer size under which slab data is unconditionnally allocated
+ * from its associated slab.
+ */
+#define MEM_BUF_SIZE_THRESHOLD (PAGE_SIZE / 8)
+
+/*
+ * Time (in seconds) between two garbage collection operations.
+ */
+#define MEM_GC_INTERVAL 15
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define MEM_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Shift for the first general cache size.
+ */
+#define MEM_CACHES_FIRST_SHIFT 5
+
+/*
+ * Number of caches backing general purpose allocations.
+ */
+#define MEM_NR_MEM_CACHES 13
+
+/*
+ * Per-processor cache of pre-constructed objects.
+ *
+ * The flags member is a read-only CPU-local copy of the parent cache flags.
+ */
+struct mem_cpu_pool {
+ pthread_mutex_t lock;
+ int flags;
+ int size;
+ int transfer_size;
+ int nr_objs;
+ void **array;
+} __aligned(CPU_L1_SIZE);
+
+/*
+ * When a cache is created, its CPU pool type is determined from the buffer
+ * size. For small buffer sizes, many objects can be cached in a CPU pool.
+ * Conversely, for large buffer sizes, this would incur much overhead, so only
+ * a few objects are stored in a CPU pool.
+ */
+struct mem_cpu_pool_type {
+ size_t buf_size;
+ int array_size;
+ size_t array_align;
+ struct mem_cache *array_cache;
+};
+
+/*
+ * Buffer descriptor.
+ *
+ * For normal caches (i.e. without MEM_CF_VERIFY), bufctls are located at the
+ * end of (but inside) each buffer. If MEM_CF_VERIFY is set, bufctls are located
+ * after each buffer.
+ *
+ * When an object is allocated to a client, its bufctl isn't used. This memory
+ * is instead used for redzoning if cache debugging is in effect.
+ */
+union mem_bufctl {
+ union mem_bufctl *next;
+ unsigned long redzone;
+};
+
+/*
+ * Redzone guard word.
+ */
+#ifdef __LP64__
+#if _HOST_BIG_ENDIAN
+#define MEM_REDZONE_WORD 0xfeedfacefeedfaceUL
+#else /* _HOST_BIG_ENDIAN */
+#define MEM_REDZONE_WORD 0xcefaedfecefaedfeUL
+#endif /* _HOST_BIG_ENDIAN */
+#else /* __LP64__ */
+#if _HOST_BIG_ENDIAN
+#define MEM_REDZONE_WORD 0xfeedfaceUL
+#else /* _HOST_BIG_ENDIAN */
+#define MEM_REDZONE_WORD 0xcefaedfeUL
+#endif /* _HOST_BIG_ENDIAN */
+#endif /* __LP64__ */
+
+/*
+ * Redzone byte for padding.
+ */
+#define MEM_REDZONE_BYTE 0xbb
+
+/*
+ * Buffer tag.
+ *
+ * This structure is only used for MEM_CF_VERIFY caches. It is located after
+ * the bufctl and includes information about the state of the buffer it
+ * describes (allocated or not). It should be thought of as a debugging
+ * extension of the bufctl.
+ */
+struct mem_buftag {
+ unsigned long state;
+};
+
+/*
+ * Values the buftag state member can take.
+ */
+#ifdef __LP64__
+#if _HOST_BIG_ENDIAN
+#define MEM_BUFTAG_ALLOC 0xa110c8eda110c8edUL
+#define MEM_BUFTAG_FREE 0xf4eeb10cf4eeb10cUL
+#else /* _HOST_BIG_ENDIAN */
+#define MEM_BUFTAG_ALLOC 0xedc810a1edc810a1UL
+#define MEM_BUFTAG_FREE 0x0cb1eef40cb1eef4UL
+#endif /* _HOST_BIG_ENDIAN */
+#else /* __LP64__ */
+#if _HOST_BIG_ENDIAN
+#define MEM_BUFTAG_ALLOC 0xa110c8edUL
+#define MEM_BUFTAG_FREE 0xf4eeb10cUL
+#else /* _HOST_BIG_ENDIAN */
+#define MEM_BUFTAG_ALLOC 0xedc810a1UL
+#define MEM_BUFTAG_FREE 0x0cb1eef4UL
+#endif /* _HOST_BIG_ENDIAN */
+#endif /* __LP64__ */
+
+/*
+ * Free and uninitialized patterns.
+ *
+ * These values are unconditionnally 64-bit wide since buffers are at least
+ * 8-byte aligned.
+ */
+#if _HOST_BIG_ENDIAN
+#define MEM_FREE_PATTERN 0xdeadbeefdeadbeefULL
+#define MEM_UNINIT_PATTERN 0xbaddcafebaddcafeULL
+#else /* _HOST_BIG_ENDIAN */
+#define MEM_FREE_PATTERN 0xefbeaddeefbeaddeULL
+#define MEM_UNINIT_PATTERN 0xfecaddbafecaddbaULL
+#endif /* _HOST_BIG_ENDIAN */
+
+/*
+ * Page-aligned collection of unconstructed buffers.
+ */
+struct mem_slab {
+ struct list list_node;
+ struct avltree_node tree_node;
+ unsigned long nr_refs;
+ union mem_bufctl *first_free;
+ void *addr;
+};
+
+/*
+ * Private cache creation flags.
+ */
+#define MEM_CREATE_INTERNAL 0x0100 /* Prevent off slab data */
+
+/*
+ * Cache name buffer size.
+ */
+#define MEM_NAME_SIZE 32
+
+/*
+ * Cache flags.
+ *
+ * The flags don't change once set and can be tested without locking.
+ */
+#define MEM_CF_DIRECT 0x0001 /* No buf-to-slab tree lookup */
+#define MEM_CF_SLAB_EXTERNAL 0x0002 /* Slab data is off slab */
+
+/*
+ * Debugging flags
+ */
+#define MEM_CF_VERIFY 0x0100 /* Use debugging facilities */
+
+/*
+ * Cache of objects.
+ *
+ * Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
+ *
+ * The partial slabs list is sorted by slab references. Slabs with a high
+ * number of references are placed first on the list to reduce fragmentation.
+ * Sorting occurs at insertion/removal of buffers in a slab. As the list
+ * is maintained sorted, and the number of references only changes by one,
+ * this is a very cheap operation in the average case and the worst (linear)
+ * case is very unlikely.
+ */
+struct mem_cache {
+ /* CPU pool layer */
+ struct mem_cpu_pool cpu_pools[NR_CPUS];
+ struct mem_cpu_pool_type *cpu_pool_type;
+
+ /* Slab layer */
+ pthread_mutex_t lock;
+ struct list node; /* Cache list linkage */
+ struct list partial_slabs;
+ struct list free_slabs;
+ struct avltree active_slabs;
+ int flags;
+ size_t obj_size; /* User-provided size */
+ size_t align;
+ size_t buf_size; /* Aligned object size */
+ size_t bufctl_dist; /* Distance from buffer to bufctl */
+ size_t slab_size;
+ size_t color;
+ size_t color_max;
+ unsigned long bufs_per_slab;
+ unsigned long nr_objs; /* Number of allocated objects */
+ unsigned long nr_bufs; /* Total number of buffers */
+ unsigned long nr_slabs;
+ unsigned long nr_free_slabs;
+ mem_cache_ctor_t ctor;
+ struct mem_source source;
+ char name[MEM_NAME_SIZE];
+ size_t buftag_dist; /* Distance from buffer to buftag */
+ size_t redzone_pad; /* Bytes from end of object to redzone word */
+};
+
+/*
+ * Options for mem_cache_alloc_verify().
+ */
+#define MEM_AV_NOCONSTRUCT 0
+#define MEM_AV_CONSTRUCT 1
+
+/*
+ * Error codes for mem_cache_error().
+ */
+#define MEM_ERR_INVALID 0 /* Invalid address being freed */
+#define MEM_ERR_DOUBLEFREE 1 /* Freeing already free address */
+#define MEM_ERR_BUFTAG 2 /* Invalid buftag content */
+#define MEM_ERR_MODIFIED 3 /* Buffer modified while free */
+#define MEM_ERR_REDZONE 4 /* Redzone violation */
+
+/*
+ * See PAGE_SIZE.
+ */
+static long _pagesize;
+
+/*
+ * Available CPU pool types.
+ *
+ * For each entry, the CPU pool size applies from the entry buf_size
+ * (excluded) up to (and including) the buf_size of the preceding entry.
+ *
+ * See struct cpu_pool_type for a description of the values.
+ */
+static struct mem_cpu_pool_type mem_cpu_pool_types[] = {
+ { 32768, 1, 0, NULL },
+ { 4096, 8, CPU_L1_SIZE, NULL },
+ { 256, 64, CPU_L1_SIZE, NULL },
+ { 0, 128, CPU_L1_SIZE, NULL }
+};
+
+/*
+ * Caches where CPU pool arrays are allocated from.
+ */
+static struct mem_cache mem_cpu_array_caches[ARRAY_SIZE(mem_cpu_pool_types)];
+
+/*
+ * Cache for off slab data.
+ */
+static struct mem_cache mem_slab_cache;
+
+/*
+ * Cache for dynamically created caches.
+ */
+static struct mem_cache mem_cache_cache;
+
+/*
+ * General caches array.
+ */
+static struct mem_cache mem_caches[MEM_NR_MEM_CACHES];
+
+/*
+ * List of all caches managed by the allocator.
+ */
+static struct list mem_cache_list;
+static pthread_mutex_t mem_cache_list_lock;
+
+/*
+ * Default backend functions.
+ */
+static void * mem_default_alloc(size_t size);
+static void mem_default_free(void *ptr, size_t size);
+
+/*
+ * Default source of memory.
+ */
+static struct mem_source mem_default_source = {
+ mem_default_alloc,
+ mem_default_free
+};
+
+#define mem_error(format, ...) \
+ fprintf(stderr, "mem: error: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
+
+#define mem_warn(format, ...) \
+ fprintf(stderr, "mem: warning: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
+
+#define mem_print(format, ...) \
+ fprintf(stderr, format "\n", ## __VA_ARGS__)
+
+static void mem_cache_error(struct mem_cache *cache, void *buf, int error,
+ void *arg);
+static void * mem_cache_alloc_from_slab(struct mem_cache *cache);
+static void mem_cache_free_to_slab(struct mem_cache *cache, void *buf);
+
+#ifdef CONFIG_MEM_USE_PHYS
+#include "phys.h"
+
+static void * mem_default_alloc(size_t size)
+{
+ phys_pfn_t pfn;
+
+ pfn = phys_alloc(P2ROUND(size, PAGE_SIZE) / PAGE_SIZE);
+ return (void *)(pfn * PAGE_SIZE);
+}
+
+static void mem_default_free(void *ptr, size_t size)
+{
+ phys_pfn_t pfn;
+
+ pfn = (phys_pfn_t)ptr / PAGE_SIZE;
+ size = P2ROUND(size, PAGE_SIZE) / PAGE_SIZE;
+
+ phys_free(pfn, size);
+}
+#else /* CONFIG_MEM_USE_PHYS */
+static void * mem_default_alloc(size_t size)
+{
+ void *addr;
+
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (addr == MAP_FAILED)
+ return NULL;
+
+ return addr;
+}
+
+static void mem_default_free(void *ptr, size_t size)
+{
+ munmap(ptr, size);
+}
+#endif /* CONFIG_MEM_USE_PHYS */
+
+static void * mem_buf_verify_bytes(void *buf, void *pattern, size_t size)
+{
+ char *ptr, *pattern_ptr, *end;
+
+ end = buf + size;
+
+ for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++)
+ if (*ptr != *pattern_ptr)
+ return ptr;
+
+ return NULL;
+}
+
+static void * mem_buf_verify(void *buf, uint64_t pattern, size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++)
+ if (*ptr != pattern)
+ return mem_buf_verify_bytes(ptr, &pattern, sizeof(pattern));
+
+ return NULL;
+}
+
+static void mem_buf_fill(void *buf, uint64_t pattern, size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++)
+ *ptr = pattern;
+}
+
+static void * mem_buf_verify_fill(void *buf, uint64_t old, uint64_t new,
+ size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++) {
+ if (*ptr != old)
+ return mem_buf_verify_bytes(ptr, &old, sizeof(old));
+
+ *ptr = new;
+ }
+
+ return NULL;
+}
+
+static inline union mem_bufctl * mem_buf_to_bufctl(void *buf,
+ struct mem_cache *cache)
+{
+ return (union mem_bufctl *)(buf + cache->bufctl_dist);
+}
+
+static inline struct mem_buftag * mem_buf_to_buftag(void *buf,
+ struct mem_cache *cache)
+{
+ return (struct mem_buftag *)(buf + cache->buftag_dist);
+}
+
+static inline void * mem_bufctl_to_buf(union mem_bufctl *bufctl,
+ struct mem_cache *cache)
+{
+ return (void *)bufctl - cache->bufctl_dist;
+}
+
+static void mem_slab_create_verify(struct mem_slab *slab,
+ struct mem_cache *cache)
+{
+ struct mem_buftag *buftag;
+ size_t buf_size;
+ unsigned long buffers;
+ void *buf;
+
+ buf_size = cache->buf_size;
+ buf = slab->addr;
+ buftag = mem_buf_to_buftag(buf, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ mem_buf_fill(buf, MEM_FREE_PATTERN, cache->bufctl_dist);
+ buftag->state = MEM_BUFTAG_FREE;
+ buf += buf_size;
+ buftag = mem_buf_to_buftag(buf, cache);
+ }
+}
+
+/*
+ * Create an empty slab for a cache.
+ *
+ * The caller must drop all locks before calling this function.
+ */
+static struct mem_slab * mem_slab_create(struct mem_cache *cache, size_t color)
+{
+ struct mem_slab *slab;
+ union mem_bufctl *bufctl;
+ size_t buf_size;
+ unsigned long buffers;
+ void *slab_buf;
+
+ slab_buf = cache->source.alloc_fn(cache->slab_size);
+
+ if (slab_buf == NULL)
+ return NULL;
+
+ if (cache->flags & MEM_CF_SLAB_EXTERNAL) {
+ slab = mem_cache_alloc(&mem_slab_cache);
+
+ if (slab == NULL) {
+ cache->source.free_fn(slab_buf, cache->slab_size);
+ return NULL;
+ }
+ } else {
+ slab = (struct mem_slab *)(slab_buf + cache->slab_size) - 1;
+ }
+
+ list_node_init(&slab->list_node);
+ avltree_node_init(&slab->tree_node);
+ slab->nr_refs = 0;
+ slab->first_free = NULL;
+ slab->addr = slab_buf + color;
+
+ buf_size = cache->buf_size;
+ bufctl = mem_buf_to_bufctl(slab->addr, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ bufctl->next = slab->first_free;
+ slab->first_free = bufctl;
+ bufctl = (union mem_bufctl *)((void *)bufctl + buf_size);
+ }
+
+ if (cache->flags & MEM_CF_VERIFY)
+ mem_slab_create_verify(slab, cache);
+
+ return slab;
+}
+
+static void mem_slab_destroy_verify(struct mem_slab *slab,
+ struct mem_cache *cache)
+{
+ struct mem_buftag *buftag;
+ size_t buf_size;
+ unsigned long buffers;
+ void *buf, *addr;
+
+ buf_size = cache->buf_size;
+ buf = slab->addr;
+ buftag = mem_buf_to_buftag(buf, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ if (buftag->state != MEM_BUFTAG_FREE)
+ mem_cache_error(cache, buf, MEM_ERR_BUFTAG, buftag);
+
+ addr = mem_buf_verify(buf, MEM_FREE_PATTERN, cache->bufctl_dist);
+
+ if (addr != NULL)
+ mem_cache_error(cache, buf, MEM_ERR_MODIFIED, addr);
+
+ buf += buf_size;
+ buftag = mem_buf_to_buftag(buf, cache);
+ }
+}
+
+/*
+ * Destroy a slab.
+ *
+ * The caller must drop all locks before calling this function.
+ */
+static void mem_slab_destroy(struct mem_slab *slab, struct mem_cache *cache)
+{
+ void *slab_buf;
+
+ assert(slab->nr_refs == 0);
+ assert(slab->first_free != NULL);
+
+ if (cache->flags & MEM_CF_VERIFY)
+ mem_slab_destroy_verify(slab, cache);
+
+ slab_buf = (void *)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
+ cache->source.free_fn(slab_buf, cache->slab_size);
+
+ if (cache->flags & MEM_CF_SLAB_EXTERNAL)
+ mem_cache_free(&mem_slab_cache, slab);
+}
+
+static inline int mem_slab_use_tree(int flags)
+{
+ return !(flags & MEM_CF_DIRECT) || (flags & MEM_CF_VERIFY);
+}
+
+static inline int mem_slab_cmp_lookup(const void *addr,
+ const struct avltree_node *node)
+{
+ struct mem_slab *slab;
+
+ slab = avltree_entry(node, struct mem_slab, tree_node);
+
+ if (addr == slab->addr)
+ return 0;
+ else if (addr < slab->addr)
+ return -1;
+ else
+ return 1;
+}
+
+static inline int mem_slab_cmp_insert(const struct avltree_node *a,
+ const struct avltree_node *b)
+{
+ struct mem_slab *slab;
+
+ slab = avltree_entry(a, struct mem_slab, tree_node);
+ return mem_slab_cmp_lookup(slab->addr, b);
+}
+
+static void mem_cpu_pool_init(struct mem_cpu_pool *cpu_pool,
+ struct mem_cache *cache)
+{
+ pthread_mutex_init(&cpu_pool->lock, NULL);
+ cpu_pool->flags = cache->flags;
+ cpu_pool->size = 0;
+ cpu_pool->transfer_size = 0;
+ cpu_pool->nr_objs = 0;
+ cpu_pool->array = NULL;
+}
+
+/*
+ * Return a CPU pool.
+ *
+ * This function will generally return the pool matching the CPU running the
+ * calling thread. Because of context switches and thread migration, the
+ * caller might be running on another processor after this function returns.
+ * Although not optimal, this should rarely happen, and it doesn't affect the
+ * allocator operations in any other way, as CPU pools are always valid, and
+ * their access is serialized by a lock.
+ */
+static inline struct mem_cpu_pool * mem_cpu_pool_get(struct mem_cache *cache)
+{
+ return &cache->cpu_pools[cpu_id()];
+}
+
+static inline void mem_cpu_pool_build(struct mem_cpu_pool *cpu_pool,
+ struct mem_cache *cache, void **array)
+{
+ cpu_pool->size = cache->cpu_pool_type->array_size;
+ cpu_pool->transfer_size = (cpu_pool->size + MEM_CPU_POOL_TRANSFER_RATIO - 1)
+ / MEM_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->array = array;
+}
+
+static inline void * mem_cpu_pool_pop(struct mem_cpu_pool *cpu_pool)
+{
+ cpu_pool->nr_objs--;
+ return cpu_pool->array[cpu_pool->nr_objs];
+}
+
+static inline void mem_cpu_pool_push(struct mem_cpu_pool *cpu_pool, void *obj)
+{
+ cpu_pool->array[cpu_pool->nr_objs] = obj;
+ cpu_pool->nr_objs++;
+}
+
+static int mem_cpu_pool_fill(struct mem_cpu_pool *cpu_pool,
+ struct mem_cache *cache)
+{
+ void *obj;
+ int i;
+
+ pthread_mutex_lock(&cache->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ obj = mem_cache_alloc_from_slab(cache);
+
+ if (obj == NULL)
+ break;
+
+ mem_cpu_pool_push(cpu_pool, obj);
+ }
+
+ pthread_mutex_unlock(&cache->lock);
+
+ return i;
+}
+
+static void mem_cpu_pool_drain(struct mem_cpu_pool *cpu_pool,
+ struct mem_cache *cache)
+{
+ void *obj;
+ int i;
+
+ pthread_mutex_lock(&cache->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ obj = mem_cpu_pool_pop(cpu_pool);
+ mem_cache_free_to_slab(cache, obj);
+ }
+
+ pthread_mutex_unlock(&cache->lock);
+}
+
+static void mem_cache_error(struct mem_cache *cache, void *buf, int error,
+ void *arg)
+{
+ struct mem_buftag *buftag;
+
+ mem_error("cache: %s, buffer: %p", cache->name, buf);
+
+ switch(error) {
+ case MEM_ERR_INVALID:
+ mem_error("freeing invalid address");
+ break;
+ case MEM_ERR_DOUBLEFREE:
+ mem_error("attempting to free the same address twice");
+ break;
+ case MEM_ERR_BUFTAG:
+ mem_error("invalid buftag content");
+ buftag = arg;
+ mem_error("buftag state: %p", (void *)buftag->state);
+ break;
+ case MEM_ERR_MODIFIED:
+ mem_error("free buffer modified");
+ mem_error("fault address: %p, offset in buffer: %td", arg, arg - buf);
+ break;
+ case MEM_ERR_REDZONE:
+ mem_error("write beyond end of buffer");
+ mem_error("fault address: %p, offset in buffer: %td", arg, arg - buf);
+ break;
+ default:
+ mem_error("unknown error");
+ }
+
+ error_die(ERR_MEM_CACHE);
+
+ /*
+ * Never reached.
+ */
+}
+
+/*
+ * Compute an appropriate slab size for the given cache.
+ *
+ * Once the slab size is known, this function sets the related properties
+ * (buffers per slab and maximum color). It can also set the MEM_CF_DIRECT
+ * and/or MEM_CF_SLAB_EXTERNAL flags depending on the resulting layout.
+ */
+static void mem_cache_compute_sizes(struct mem_cache *cache, int flags)
+{
+ size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size;
+ size_t waste, waste_min;
+ int embed, optimal_embed;
+
+ buf_size = cache->buf_size;
+
+ if (buf_size < MEM_BUF_SIZE_THRESHOLD)
+ flags |= MEM_CREATE_INTERNAL;
+
+ i = 0;
+ waste_min = (size_t)-1;
+
+ do {
+ i++;
+ slab_size = P2ROUND(i * buf_size, PAGE_SIZE);
+ free_slab_size = slab_size;
+
+ if (flags & MEM_CREATE_INTERNAL)
+ free_slab_size -= sizeof(struct mem_slab);
+
+ buffers = free_slab_size / buf_size;
+ waste = free_slab_size % buf_size;
+
+ if (buffers > i)
+ i = buffers;
+
+ if (flags & MEM_CREATE_INTERNAL)
+ embed = 1;
+ else if (sizeof(struct mem_slab) <= waste) {
+ embed = 1;
+ waste -= sizeof(struct mem_slab);
+ } else {
+ embed = 0;
+ }
+
+ if (waste <= waste_min) {
+ waste_min = waste;
+ optimal_size = slab_size;
+ optimal_embed = embed;
+ }
+ } while ((buffers < MEM_MIN_BUFS_PER_SLAB)
+ && (slab_size < MEM_SLAB_SIZE_THRESHOLD));
+
+ assert(!(flags & MEM_CREATE_INTERNAL) || optimal_embed);
+
+ cache->slab_size = optimal_size;
+ slab_size = cache->slab_size - (optimal_embed
+ ? sizeof(struct mem_slab)
+ : 0);
+ cache->bufs_per_slab = slab_size / buf_size;
+ cache->color_max = slab_size % buf_size;
+
+ if (cache->color_max >= PAGE_SIZE)
+ cache->color_max = PAGE_SIZE - 1;
+
+ if (optimal_embed) {
+ if (cache->slab_size == PAGE_SIZE)
+ cache->flags |= MEM_CF_DIRECT;
+ } else {
+ cache->flags |= MEM_CF_SLAB_EXTERNAL;
+ }
+}
+
+static void mem_cache_init(struct mem_cache *cache, const char *name,
+ size_t obj_size, size_t align, mem_cache_ctor_t ctor,
+ struct mem_source *source, int flags)
+{
+ struct mem_cpu_pool_type *cpu_pool_type;
+ size_t i, buf_size;
+
+#ifdef CONFIG_MEM_VERIFY
+ cache->flags = MEM_CF_VERIFY;
+#else
+ cache->flags = 0;
+#endif
+
+ if (flags & MEM_CACHE_VERIFY)
+ cache->flags |= MEM_CF_VERIFY;
+
+ if (align < MEM_ALIGN_MIN)
+ align = MEM_ALIGN_MIN;
+
+ assert(obj_size > 0);
+ assert(ISP2(align));
+ assert(align < PAGE_SIZE);
+
+ buf_size = P2ROUND(obj_size, align);
+
+ if (source == NULL)
+ source = &mem_default_source;
+
+ pthread_mutex_init(&cache->lock, NULL);
+ list_node_init(&cache->node);
+ list_init(&cache->partial_slabs);
+ list_init(&cache->free_slabs);
+ avltree_init(&cache->active_slabs);
+ cache->obj_size = obj_size;
+ cache->align = align;
+ cache->buf_size = buf_size;
+ cache->bufctl_dist = buf_size - sizeof(union mem_bufctl);
+ cache->color = 0;
+ cache->nr_objs = 0;
+ cache->nr_bufs = 0;
+ cache->nr_slabs = 0;
+ cache->nr_free_slabs = 0;
+ cache->ctor = ctor;
+ cache->source = *source;
+ strncpy(cache->name, name, MEM_NAME_SIZE);
+ cache->name[MEM_NAME_SIZE - 1] = '\0';
+ cache->buftag_dist = 0;
+ cache->redzone_pad = 0;
+
+ if (cache->flags & MEM_CF_VERIFY) {
+ cache->bufctl_dist = buf_size;
+ cache->buftag_dist = cache->bufctl_dist + sizeof(union mem_bufctl);
+ cache->redzone_pad = cache->bufctl_dist - cache->obj_size;
+ buf_size += sizeof(union mem_bufctl) + sizeof(struct mem_buftag);
+ buf_size = P2ROUND(buf_size, align);
+ cache->buf_size = buf_size;
+ }
+
+ mem_cache_compute_sizes(cache, flags);
+
+ for (cpu_pool_type = mem_cpu_pool_types;
+ buf_size <= cpu_pool_type->buf_size;
+ cpu_pool_type++);
+
+ cache->cpu_pool_type = cpu_pool_type;
+
+ for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++)
+ mem_cpu_pool_init(&cache->cpu_pools[i], cache);
+
+ pthread_mutex_lock(&mem_cache_list_lock);
+ list_insert_tail(&mem_cache_list, &cache->node);
+ pthread_mutex_unlock(&mem_cache_list_lock);
+}
+
+struct mem_cache * mem_cache_create(const char *name, size_t obj_size,
+ size_t align, mem_cache_ctor_t ctor,
+ struct mem_source *source, int flags)
+{
+ struct mem_cache *cache;
+
+ cache = mem_cache_alloc(&mem_cache_cache);
+
+ if (cache == NULL)
+ return NULL;
+
+ mem_cache_init(cache, name, obj_size, align, ctor, source, flags);
+
+ return cache;
+}
+
+static inline int mem_cache_empty(struct mem_cache *cache)
+{
+ return cache->nr_objs == cache->nr_bufs;
+}
+
+static int mem_cache_grow(struct mem_cache *cache)
+{
+ struct mem_slab *slab;
+ size_t color;
+ int empty;
+
+ pthread_mutex_lock(&cache->lock);
+
+ if (!mem_cache_empty(cache)) {
+ pthread_mutex_unlock(&cache->lock);
+ return 1;
+ }
+
+ color = cache->color;
+ cache->color += cache->align;
+
+ if (cache->color > cache->color_max)
+ cache->color = 0;
+
+ pthread_mutex_unlock(&cache->lock);
+
+ slab = mem_slab_create(cache, color);
+
+ pthread_mutex_lock(&cache->lock);
+
+ if (slab != NULL) {
+ list_insert_tail(&cache->free_slabs, &slab->list_node);
+ cache->nr_bufs += cache->bufs_per_slab;
+ cache->nr_slabs++;
+ cache->nr_free_slabs++;
+ }
+
+ /*
+ * Even if our slab creation failed, another thread might have succeeded
+ * in growing the cache.
+ */
+ empty = mem_cache_empty(cache);
+
+ pthread_mutex_unlock(&cache->lock);
+
+ return !empty;
+}
+
+static void mem_cache_reap(struct mem_cache *cache)
+{
+ struct mem_slab *slab;
+ struct list dead_slabs;
+
+ list_init(&dead_slabs);
+
+ pthread_mutex_lock(&cache->lock);
+
+ while (!list_empty(&cache->free_slabs)) {
+ slab = list_first_entry(&cache->free_slabs, struct mem_slab, list_node);
+ list_remove(&slab->list_node);
+ list_insert(&dead_slabs, &slab->list_node);
+ cache->nr_bufs -= cache->bufs_per_slab;
+ cache->nr_slabs--;
+ cache->nr_free_slabs--;
+ }
+
+ pthread_mutex_unlock(&cache->lock);
+
+ while (!list_empty(&dead_slabs)) {
+ slab = list_first_entry(&dead_slabs, struct mem_slab, list_node);
+ list_remove(&slab->list_node);
+ mem_slab_destroy(slab, cache);
+ }
+}
+
+void mem_cache_destroy(struct mem_cache *cache)
+{
+ struct mem_cpu_pool *cpu_pool;
+ void **ptr;
+ size_t i;
+
+ pthread_mutex_lock(&mem_cache_list_lock);
+ list_remove(&cache->node);
+ pthread_mutex_unlock(&mem_cache_list_lock);
+
+ for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) {
+ cpu_pool = &cache->cpu_pools[i];
+
+ pthread_mutex_lock(&cpu_pool->lock);
+
+ if (cpu_pool->array == NULL) {
+ pthread_mutex_unlock(&cpu_pool->lock);
+ continue;
+ }
+
+ pthread_mutex_lock(&cache->lock);
+
+ for (ptr = cpu_pool->array + cpu_pool->nr_objs - 1;
+ ptr >= cpu_pool->array;
+ ptr--)
+ mem_cache_free_to_slab(cache, *ptr);
+
+ pthread_mutex_unlock(&cache->lock);
+
+ ptr = cpu_pool->array;
+ cpu_pool->size = 0;
+ cpu_pool->nr_objs = 0;
+ cpu_pool->array = NULL;
+ pthread_mutex_unlock(&cpu_pool->lock);
+
+ mem_cache_free(cache->cpu_pool_type->array_cache, ptr);
+ }
+
+ mem_cache_reap(cache);
+
+#ifndef NDEBUG
+ if (cache->nr_objs != 0)
+ mem_warn("'%s' not empty", cache->name);
+ else {
+ assert(list_empty(&cache->partial_slabs));
+ assert(list_empty(&cache->free_slabs));
+ assert(avltree_empty(&cache->active_slabs));
+ assert(cache->nr_bufs == 0);
+ assert(cache->nr_slabs == 0);
+ }
+#endif /* NDEBUG */
+
+ pthread_mutex_destroy(&cache->lock);
+
+ for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++)
+ pthread_mutex_destroy(&cache->cpu_pools[i].lock);
+
+ mem_cache_free(&mem_cache_cache, cache);
+}
+
+/*
+ * Allocate a raw (unconstructed) buffer from the slab layer of a cache.
+ *
+ * The cache must be locked before calling this function.
+ */
+static void * mem_cache_alloc_from_slab(struct mem_cache *cache)
+{
+ struct mem_slab *slab;
+ union mem_bufctl *bufctl;
+
+ if (!list_empty(&cache->partial_slabs))
+ slab = list_first_entry(&cache->partial_slabs, struct mem_slab,
+ list_node);
+ else if (!list_empty(&cache->free_slabs))
+ slab = list_first_entry(&cache->free_slabs, struct mem_slab, list_node);
+ else
+ return NULL;
+
+ bufctl = slab->first_free;
+ assert(bufctl != NULL);
+ slab->first_free = bufctl->next;
+ slab->nr_refs++;
+ cache->nr_objs++;
+
+ /*
+ * The slab has become complete.
+ */
+ if (slab->nr_refs == cache->bufs_per_slab) {
+ list_remove(&slab->list_node);
+
+ if (slab->nr_refs == 1)
+ cache->nr_free_slabs--;
+ } else if (slab->nr_refs == 1) {
+ /*
+ * The slab has become partial.
+ */
+ list_remove(&slab->list_node);
+ list_insert_tail(&cache->partial_slabs, &slab->list_node);
+ cache->nr_free_slabs--;
+ } else if (!list_singular(&cache->partial_slabs)) {
+ struct list *node;
+ struct mem_slab *tmp;
+
+ /*
+ * The slab remains partial. If there are more than one partial slabs,
+ * maintain the list sorted.
+ */
+
+ assert(slab->nr_refs > 1);
+
+ for (node = list_prev(&slab->list_node);
+ !list_end(&cache->partial_slabs, node);
+ node = list_prev(node)) {
+ tmp = list_entry(node, struct mem_slab, list_node);
+
+ if (tmp->nr_refs >= slab->nr_refs)
+ break;
+ }
+
+ /*
+ * If the direct neighbor was found, the list is already sorted.
+ * If no slab was found, the slab is inserted at the head of the list.
+ */
+ if (node != list_prev(&slab->list_node)) {
+ list_remove(&slab->list_node);
+ list_insert_after(node, &slab->list_node);
+ }
+ }
+
+ if ((slab->nr_refs == 1) && mem_slab_use_tree(cache->flags))
+ avltree_insert(&cache->active_slabs, &slab->tree_node,
+ mem_slab_cmp_insert);
+
+ return mem_bufctl_to_buf(bufctl, cache);
+}
+
+/*
+ * Release a buffer to the slab layer of a cache.
+ *
+ * The cache must be locked before calling this function.
+ */
+static void mem_cache_free_to_slab(struct mem_cache *cache, void *buf)
+{
+ struct mem_slab *slab;
+ union mem_bufctl *bufctl;
+
+ if (cache->flags & MEM_CF_DIRECT) {
+ assert(cache->slab_size == PAGE_SIZE);
+ slab = (struct mem_slab *)P2END((unsigned long)buf, cache->slab_size)
+ - 1;
+ } else {
+ struct avltree_node *node;
+
+ node = avltree_lookup_nearest(&cache->active_slabs, buf,
+ mem_slab_cmp_lookup, AVLTREE_LEFT);
+ assert(node != NULL);
+ slab = avltree_entry(node, struct mem_slab, tree_node);
+ assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr
+ + cache->slab_size, PAGE_SIZE)));
+ }
+
+ assert(slab->nr_refs >= 1);
+ assert(slab->nr_refs <= cache->bufs_per_slab);
+ bufctl = mem_buf_to_bufctl(buf, cache);
+ bufctl->next = slab->first_free;
+ slab->first_free = bufctl;
+ slab->nr_refs--;
+ cache->nr_objs--;
+
+ /*
+ * The slab has become free.
+ */
+ if (slab->nr_refs == 0) {
+ if (mem_slab_use_tree(cache->flags))
+ avltree_remove(&cache->active_slabs, &slab->tree_node);
+
+ /*
+ * The slab was partial.
+ */
+ if (cache->bufs_per_slab > 1)
+ list_remove(&slab->list_node);
+
+ list_insert_tail(&cache->free_slabs, &slab->list_node);
+ cache->nr_free_slabs++;
+ } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
+ /*
+ * The slab has become partial.
+ */
+ list_insert(&cache->partial_slabs, &slab->list_node);
+ } else if (!list_singular(&cache->partial_slabs)) {
+ struct list *node;
+ struct mem_slab *tmp;
+
+ /*
+ * The slab remains partial. If there are more than one partial slabs,
+ * maintain the list sorted.
+ */
+
+ assert(slab->nr_refs > 0);
+
+ for (node = list_next(&slab->list_node);
+ !list_end(&cache->partial_slabs, node);
+ node = list_next(node)) {
+ tmp = list_entry(node, struct mem_slab, list_node);
+
+ if (tmp->nr_refs <= slab->nr_refs)
+ break;
+ }
+
+ /*
+ * If the direct neighbor was found, the list is already sorted.
+ * If no slab was found, the slab is inserted at the tail of the list.
+ */
+ if (node != list_next(&slab->list_node)) {
+ list_remove(&slab->list_node);
+ list_insert_before(node, &slab->list_node);
+ }
+ }
+}
+
+static void mem_cache_alloc_verify(struct mem_cache *cache, void *buf,
+ int construct)
+{
+ struct mem_buftag *buftag;
+ union mem_bufctl *bufctl;
+ void *addr;
+
+ buftag = mem_buf_to_buftag(buf, cache);
+
+ if (buftag->state != MEM_BUFTAG_FREE)
+ mem_cache_error(cache, buf, MEM_ERR_BUFTAG, buftag);
+
+ addr = mem_buf_verify_fill(buf, MEM_FREE_PATTERN, MEM_UNINIT_PATTERN,
+ cache->bufctl_dist);
+
+ if (addr != NULL)
+ mem_cache_error(cache, buf, MEM_ERR_MODIFIED, addr);
+
+ addr = buf + cache->obj_size;
+ memset(addr, MEM_REDZONE_BYTE, cache->redzone_pad);
+
+ bufctl = mem_buf_to_bufctl(buf, cache);
+ bufctl->redzone = MEM_REDZONE_WORD;
+ buftag->state = MEM_BUFTAG_ALLOC;
+
+ if (construct && (cache->ctor != NULL))
+ cache->ctor(buf);
+}
+
+void * mem_cache_alloc(struct mem_cache *cache)
+{
+ struct mem_cpu_pool *cpu_pool;
+ int filled;
+ void *buf;
+
+ cpu_pool = mem_cpu_pool_get(cache);
+
+ pthread_mutex_lock(&cpu_pool->lock);
+
+fast_alloc_retry:
+ if (likely(cpu_pool->nr_objs > 0)) {
+ buf = mem_cpu_pool_pop(cpu_pool);
+ pthread_mutex_unlock(&cpu_pool->lock);
+
+ if (cpu_pool->flags & MEM_CF_VERIFY)
+ mem_cache_alloc_verify(cache, buf, MEM_AV_CONSTRUCT);
+
+ return buf;
+ }
+
+ if (cpu_pool->array != NULL) {
+ filled = mem_cpu_pool_fill(cpu_pool, cache);
+
+ if (!filled) {
+ pthread_mutex_unlock(&cpu_pool->lock);
+
+ filled = mem_cache_grow(cache);
+
+ if (!filled)
+ return NULL;
+
+ pthread_mutex_lock(&cpu_pool->lock);
+ }
+
+ goto fast_alloc_retry;
+ }
+
+ pthread_mutex_unlock(&cpu_pool->lock);
+
+slow_alloc_retry:
+ pthread_mutex_lock(&cache->lock);
+ buf = mem_cache_alloc_from_slab(cache);
+ pthread_mutex_unlock(&cache->lock);
+
+ if (buf == NULL) {
+ filled = mem_cache_grow(cache);
+
+ if (!filled)
+ return NULL;
+
+ goto slow_alloc_retry;
+ }
+
+ if (cache->flags & MEM_CF_VERIFY)
+ mem_cache_alloc_verify(cache, buf, MEM_AV_NOCONSTRUCT);
+
+ if (cache->ctor != NULL)
+ cache->ctor(buf);
+
+ return buf;
+}
+
+static void mem_cache_free_verify(struct mem_cache *cache, void *buf)
+{
+ struct avltree_node *node;
+ struct mem_buftag *buftag;
+ struct mem_slab *slab;
+ union mem_bufctl *bufctl;
+ unsigned char *redzone_byte;
+ unsigned long slabend;
+
+ pthread_mutex_lock(&cache->lock);
+ node = avltree_lookup_nearest(&cache->active_slabs, buf,
+ mem_slab_cmp_lookup, AVLTREE_LEFT);
+ pthread_mutex_unlock(&cache->lock);
+
+ if (node == NULL)
+ mem_cache_error(cache, buf, MEM_ERR_INVALID, NULL);
+
+ slab = avltree_entry(node, struct mem_slab, tree_node);
+ slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE);
+
+ if ((unsigned long)buf >= slabend)
+ mem_cache_error(cache, buf, MEM_ERR_INVALID, NULL);
+
+ if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size)
+ != 0)
+ mem_cache_error(cache, buf, MEM_ERR_INVALID, NULL);
+
+ /*
+ * As the buffer address is valid, accessing its buftag is safe.
+ */
+ buftag = mem_buf_to_buftag(buf, cache);
+
+ if (buftag->state != MEM_BUFTAG_ALLOC) {
+ if (buftag->state == MEM_BUFTAG_FREE)
+ mem_cache_error(cache, buf, MEM_ERR_DOUBLEFREE, NULL);
+ else
+ mem_cache_error(cache, buf, MEM_ERR_BUFTAG, buftag);
+ }
+
+ redzone_byte = buf + cache->obj_size;
+ bufctl = mem_buf_to_bufctl(buf, cache);
+
+ while (redzone_byte < (unsigned char *)bufctl) {
+ if (*redzone_byte != MEM_REDZONE_BYTE)
+ mem_cache_error(cache, buf, MEM_ERR_REDZONE, redzone_byte);
+
+ redzone_byte++;
+ }
+
+ if (bufctl->redzone != MEM_REDZONE_WORD) {
+ unsigned long word;
+
+ word = MEM_REDZONE_WORD;
+ redzone_byte = mem_buf_verify_bytes(&bufctl->redzone, &word,
+ sizeof(bufctl->redzone));
+ mem_cache_error(cache, buf, MEM_ERR_REDZONE, redzone_byte);
+ }
+
+ mem_buf_fill(buf, MEM_FREE_PATTERN, cache->bufctl_dist);
+ buftag->state = MEM_BUFTAG_FREE;
+}
+
+void mem_cache_free(struct mem_cache *cache, void *obj)
+{
+ struct mem_cpu_pool *cpu_pool;
+ void **array;
+
+ cpu_pool = mem_cpu_pool_get(cache);
+
+ if (cpu_pool->flags & MEM_CF_VERIFY)
+ mem_cache_free_verify(cache, obj);
+
+ pthread_mutex_lock(&cpu_pool->lock);
+
+fast_free_retry:
+ if (likely(cpu_pool->nr_objs < cpu_pool->size)) {
+ mem_cpu_pool_push(cpu_pool, obj);
+ pthread_mutex_unlock(&cpu_pool->lock);
+ return;
+ }
+
+ if (cpu_pool->array != NULL) {
+ mem_cpu_pool_drain(cpu_pool, cache);
+ goto fast_free_retry;
+ }
+
+ pthread_mutex_unlock(&cpu_pool->lock);
+
+ array = mem_cache_alloc(cache->cpu_pool_type->array_cache);
+
+ if (array != NULL) {
+ pthread_mutex_lock(&cpu_pool->lock);
+
+ /*
+ * Another thread may have built the CPU pool while the lock was
+ * dropped.
+ */
+ if (cpu_pool->array != NULL) {
+ pthread_mutex_unlock(&cpu_pool->lock);
+ mem_cache_free(cache->cpu_pool_type->array_cache, array);
+ goto fast_free_retry;
+ }
+
+ mem_cpu_pool_build(cpu_pool, cache, array);
+ goto fast_free_retry;
+ }
+
+ mem_cache_free_to_slab(cache, obj);
+}
+
+void mem_cache_info(struct mem_cache *cache)
+{
+ struct mem_cache *cache_stats;
+ char flags_str[64];
+
+ if (cache == NULL) {
+ pthread_mutex_lock(&mem_cache_list_lock);
+
+ list_for_each_entry(&mem_cache_list, cache, node)
+ mem_cache_info(cache);
+
+ pthread_mutex_unlock(&mem_cache_list_lock);
+
+ return;
+ }
+
+ cache_stats = mem_alloc(sizeof(*cache_stats));
+
+ if (cache_stats == NULL) {
+ mem_warn("unable to allocate memory for cache stats");
+ return;
+ }
+
+ pthread_mutex_lock(&cache->lock);
+ cache_stats->flags = cache->flags;
+ cache_stats->obj_size = cache->obj_size;
+ cache_stats->align = cache->align;
+ cache_stats->buf_size = cache->buf_size;
+ cache_stats->bufctl_dist = cache->bufctl_dist;
+ cache_stats->slab_size = cache->slab_size;
+ cache_stats->color_max = cache->color_max;
+ cache_stats->bufs_per_slab = cache->bufs_per_slab;
+ cache_stats->nr_objs = cache->nr_objs;
+ cache_stats->nr_bufs = cache->nr_bufs;
+ cache_stats->nr_slabs = cache->nr_slabs;
+ cache_stats->nr_free_slabs = cache->nr_free_slabs;
+ strcpy(cache_stats->name, cache->name);
+ cache_stats->buftag_dist = cache->buftag_dist;
+ cache_stats->redzone_pad = cache->redzone_pad;
+ cache_stats->cpu_pool_type = cache->cpu_pool_type;
+ pthread_mutex_unlock(&cache->lock);
+
+ snprintf(flags_str, sizeof(flags_str), "%s%s%s",
+ (cache_stats->flags & MEM_CF_DIRECT) ? " DIRECT" : "",
+ (cache_stats->flags & MEM_CF_SLAB_EXTERNAL) ? " SLAB_EXTERNAL" : "",
+ (cache_stats->flags & MEM_CF_VERIFY) ? " VERIFY" : "");
+
+ mem_print("name: %s", cache_stats->name);
+ mem_print("flags: 0x%x%s", cache_stats->flags, flags_str);
+ mem_print("obj_size: %zu", cache_stats->obj_size);
+ mem_print("align: %zu", cache_stats->align);
+ mem_print("buf_size: %zu", cache_stats->buf_size);
+ mem_print("bufctl_dist: %zu", cache_stats->bufctl_dist);
+ mem_print("slab_size: %zu", cache_stats->slab_size);
+ mem_print("color_max: %zu", cache_stats->color_max);
+ mem_print("bufs_per_slab: %lu", cache_stats->bufs_per_slab);
+ mem_print("nr_objs: %lu", cache_stats->nr_objs);
+ mem_print("nr_bufs: %lu", cache_stats->nr_bufs);
+ mem_print("nr_slabs: %lu", cache_stats->nr_slabs);
+ mem_print("nr_free_slabs: %lu", cache_stats->nr_free_slabs);
+ mem_print("buftag_dist: %zu", cache_stats->buftag_dist);
+ mem_print("redzone_pad: %zu", cache_stats->redzone_pad);
+ mem_print("cpu_pool_size: %d", cache_stats->cpu_pool_type->array_size);
+ mem_print("--");
+
+ mem_free(cache_stats, sizeof(*cache_stats));
+}
+
+static void * mem_gc(void *arg)
+{
+ struct mem_cache *cache;
+ struct timespec ts;
+ int error;
+
+ (void)arg;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ for (;;) {
+ ts.tv_sec += MEM_GC_INTERVAL;
+
+ do
+ error = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &ts, NULL);
+ while (error == EINTR);
+
+ /*
+ * EINTR is the only expected error.
+ */
+ assert(error == 0);
+
+#if 0
+ mem_info();
+
+#ifdef CONFIG_MEM_USE_PHYS
+ phys_info();
+#endif /* CONFIG_MEM_USE_PHYS */
+#endif
+
+ pthread_mutex_lock(&mem_cache_list_lock);
+
+ list_for_each_entry(&mem_cache_list, cache, node)
+ mem_cache_reap(cache);
+
+ pthread_mutex_unlock(&mem_cache_list_lock);
+ }
+
+ return NULL;
+}
+
+void mem_init(void)
+{
+ static int mem_initialized = 0;
+ struct mem_cpu_pool_type *cpu_pool_type;
+ char name[MEM_NAME_SIZE];
+ pthread_t thread;
+ size_t i, size;
+ int error;
+
+ if (mem_initialized)
+ return;
+
+ mem_initialized = 1;
+
+ _pagesize = sysconf(_SC_PAGESIZE);
+ assert(ISP2(_pagesize));
+
+ /*
+ * Make sure a bufctl can always be stored in a buffer.
+ */
+ assert(sizeof(union mem_bufctl) <= MEM_ALIGN_MIN);
+
+#ifdef CONFIG_MEM_USE_PHYS
+ phys_init();
+#endif /* CONFIG_MEM_USE_PHYS */
+
+ list_init(&mem_cache_list);
+ pthread_mutex_init(&mem_cache_list_lock, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(mem_cpu_pool_types); i++) {
+ cpu_pool_type = &mem_cpu_pool_types[i];
+ cpu_pool_type->array_cache = &mem_cpu_array_caches[i];
+ sprintf(name, "mem_cpu_array_%d", cpu_pool_type->array_size);
+ size = sizeof(void *) * cpu_pool_type->array_size;
+ mem_cache_init(cpu_pool_type->array_cache, name, size,
+ cpu_pool_type->array_align, NULL, NULL, 0);
+ }
+
+ /*
+ * Prevent off slab data for the slab cache to avoid infinite recursion.
+ */
+ mem_cache_init(&mem_slab_cache, "mem_slab", sizeof(struct mem_slab),
+ 0, NULL, NULL, MEM_CREATE_INTERNAL);
+ mem_cache_init(&mem_cache_cache, "mem_cache", sizeof(struct mem_cache),
+ CPU_L1_SIZE, NULL, NULL, 0);
+
+ size = 1 << MEM_CACHES_FIRST_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(mem_caches); i++) {
+ sprintf(name, "mem_%zu", size);
+ mem_cache_init(&mem_caches[i], name, size, 0, NULL, NULL, 0);
+ size <<= 1;
+ }
+
+ error = pthread_create(&thread, NULL, mem_gc, NULL);
+
+ if (error)
+ mem_error("unable to create garbage collection thread: %s",
+ strerror(error));
+}
+
+/*
+ * Return the mem cache index matching the given allocation size, which
+ * must be strictly greater than 0.
+ */
+static inline size_t mem_get_index(size_t size)
+{
+ assert(size != 0);
+
+ size = (size - 1) >> MEM_CACHES_FIRST_SHIFT;
+
+ if (size == 0)
+ return 0;
+ else
+ return (sizeof(long) * CHAR_BIT) - __builtin_clzl(size);
+}
+
+static void mem_alloc_verify(struct mem_cache *cache, void *buf, size_t size)
+{
+ size_t redzone_size;
+ void *redzone;
+
+ assert(size <= cache->obj_size);
+
+ redzone = buf + size;
+ redzone_size = cache->obj_size - size;
+ memset(redzone, MEM_REDZONE_BYTE, redzone_size);
+}
+
+void * mem_alloc(size_t size)
+{
+ size_t index;
+ void *buf;
+
+ if (size == 0)
+ return NULL;
+
+ index = mem_get_index(size);
+
+ if (index < ARRAY_SIZE(mem_caches)) {
+ struct mem_cache *cache;
+
+ cache = &mem_caches[index];
+ buf = mem_cache_alloc(cache);
+
+ if ((buf != NULL) && (cache->flags & MEM_CF_VERIFY))
+ mem_alloc_verify(cache, buf, size);
+ } else {
+ buf = mem_default_alloc(size);
+ }
+
+ return buf;
+}
+
+void * mem_zalloc(size_t size)
+{
+ void *ptr;
+
+ ptr = mem_alloc(size);
+
+ if (ptr == NULL)
+ return NULL;
+
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+static void mem_free_verify(struct mem_cache *cache, void *buf, size_t size)
+{
+ unsigned char *redzone_byte, *redzone_end;
+
+ assert(size <= cache->obj_size);
+
+ redzone_byte = buf + size;
+ redzone_end = buf + cache->obj_size;
+
+ while (redzone_byte < redzone_end) {
+ if (*redzone_byte != MEM_REDZONE_BYTE)
+ mem_cache_error(cache, buf, MEM_ERR_REDZONE, redzone_byte);
+
+ redzone_byte++;
+ }
+}
+
+void mem_free(void *ptr, size_t size)
+{
+ size_t index;
+
+ if ((ptr == NULL) || (size == 0))
+ return;
+
+ index = mem_get_index(size);
+
+ if (index < ARRAY_SIZE(mem_caches)) {
+ struct mem_cache *cache;
+
+ cache = &mem_caches[index];
+
+ if (cache->flags & MEM_CF_VERIFY)
+ mem_free_verify(cache, ptr, size);
+
+ mem_cache_free(cache, ptr);
+ } else {
+ mem_default_free(ptr, size);
+ }
+}
+
+void mem_info(void)
+{
+ struct mem_cache *cache, *cache_stats;
+ size_t mem_usage, mem_reclaimable;
+
+ cache_stats = mem_alloc(sizeof(*cache_stats));
+
+ if (cache_stats == NULL) {
+ mem_warn("unable to allocate memory for cache stats");
+ return;
+ }
+
+ mem_print("-- cache obj slab bufs objs bufs "
+ " total reclaimable");
+ mem_print("-- name size size /slab usage count "
+ " memory memory");
+
+ pthread_mutex_lock(&mem_cache_list_lock);
+
+ list_for_each_entry(&mem_cache_list, cache, node) {
+ pthread_mutex_lock(&cache->lock);
+ cache_stats->obj_size = cache->obj_size;
+ cache_stats->slab_size = cache->slab_size;
+ cache_stats->bufs_per_slab = cache->bufs_per_slab;
+ cache_stats->nr_objs = cache->nr_objs;
+ cache_stats->nr_bufs = cache->nr_bufs;
+ cache_stats->nr_slabs = cache->nr_slabs;
+ cache_stats->nr_free_slabs = cache->nr_free_slabs;
+ strcpy(cache_stats->name, cache->name);
+ pthread_mutex_unlock(&cache->lock);
+
+ mem_usage = (cache_stats->nr_slabs * cache_stats->slab_size) >> 10;
+ mem_reclaimable =
+ (cache_stats->nr_free_slabs * cache_stats->slab_size) >> 10;
+
+ mem_print("%-27s %6zu %3zuk %4lu %6lu %6lu %7zuk %10zuk",
+ cache_stats->name, cache_stats->obj_size,
+ cache_stats->slab_size >> 10, cache_stats->bufs_per_slab,
+ cache_stats->nr_objs, cache_stats->nr_bufs, mem_usage,
+ mem_reclaimable);
+ }
+
+ pthread_mutex_unlock(&mem_cache_list_lock);
+
+ mem_free(cache_stats, sizeof(*cache_stats));
+}
diff --git a/mem.h b/mem.h
new file mode 100644
index 0000000..9dd3045
--- /dev/null
+++ b/mem.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Object caching and general purpose memory allocator.
+ */
+
+#ifndef _MEM_H
+#define _MEM_H
+
+#include <stddef.h>
+
+/*
+ * Backend source of memory for a cache.
+ */
+struct mem_source {
+ void * (*alloc_fn)(size_t);
+ void (*free_fn)(void *, size_t);
+};
+
+/*
+ * Object cache opaque declaration.
+ */
+struct mem_cache;
+
+/*
+ * Type for constructor functions.
+ *
+ * The pre-constructed state of an object is supposed to include only
+ * elements such as e.g. linked lists, locks, reference counters. Therefore
+ * constructors are expected to 1) never fail and 2) not need any
+ * user-provided data. The first constraint implies that object construction
+ * never performs dynamic resource allocation, which also means there is no
+ * need for destructors.
+ */
+typedef void (*mem_cache_ctor_t)(void *);
+
+/*
+ * Cache creation flags.
+ */
+#define MEM_CACHE_VERIFY 0x1 /* Use debugging facilities */
+
+/*
+ * Create a cache.
+ */
+struct mem_cache * mem_cache_create(const char *name, size_t obj_size,
+ size_t align, mem_cache_ctor_t ctor,
+ struct mem_source *source, int flags);
+
+/*
+ * Destroy a cache.
+ */
+void mem_cache_destroy(struct mem_cache *cache);
+
+/*
+ * Allocate an object from a cache.
+ */
+void * mem_cache_alloc(struct mem_cache *cache);
+
+/*
+ * Release an object to its cache.
+ */
+void mem_cache_free(struct mem_cache *cache, void *obj);
+
+/*
+ * Display internal cache stats on stderr.
+ *
+ * If cache is NULL, this function displays all managed caches.
+ */
+void mem_cache_info(struct mem_cache *cache);
+
+/*
+ * Initialize the memory allocator module.
+ */
+void mem_init(void);
+
+/*
+ * Allocate size bytes of uninitialized memory.
+ */
+void * mem_alloc(size_t size);
+
+/*
+ * Allocate size bytes of zeroed memory.
+ */
+void * mem_zalloc(size_t size);
+
+/*
+ * Release memory obtained with mem_alloc() or mem_zalloc().
+ *
+ * The size argument must strictly match the value given at allocation time.
+ */
+void mem_free(void *ptr, size_t size);
+
+/*
+ * Display global memory information on stderr.
+ */
+void mem_info(void);
+
+#endif /* _MEM_H */
diff --git a/mem_malloc.c b/mem_malloc.c
new file mode 100644
index 0000000..8df6814
--- /dev/null
+++ b/mem_malloc.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Libc-compatible malloc() functions implementation.
+ *
+ * Keep in mind this code is mainly used for tests. Also, initialization is
+ * not thread-safe.
+ */
+
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "mem.h"
+#include "macros.h"
+#include "mem_malloc.h"
+
+struct btag {
+ void *addr;
+ size_t size;
+} __aligned(8);
+
+void * malloc(size_t size)
+{
+ struct btag *btag;
+
+ mem_init();
+
+ size += sizeof(*btag);
+ btag = mem_alloc(size);
+
+ if (btag == NULL) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ btag->addr = btag;
+ btag->size = size;
+
+ return btag + 1;
+}
+
+void * calloc(size_t nmemb, size_t size)
+{
+ size_t bytes;
+ void *buf;
+
+ mem_init();
+
+ bytes = nmemb * size;
+ buf = malloc(bytes);
+
+ if (buf == NULL)
+ return NULL;
+
+ memset(buf, 0, bytes);
+
+ return buf;
+}
+
+void * realloc(void *ptr, size_t size)
+{
+ struct btag *btag;
+ size_t old_size;
+ char *buf;
+
+ mem_init();
+
+ if (ptr == NULL)
+ return malloc(size);
+ else if (size == 0) {
+ free(ptr);
+ return NULL;
+ }
+
+ buf = malloc(size);
+
+ if (buf == NULL)
+ return NULL;
+
+ btag = (struct btag *)ptr - 1;
+ old_size = btag->size - sizeof(*btag);
+ memcpy(buf, ptr, MIN(old_size, size));
+ mem_free(btag->addr, btag->size);
+
+ return buf;
+}
+
+int posix_memalign(void **ptr, size_t align, size_t size)
+{
+ struct btag *btag;
+ char *buf;
+
+ mem_init();
+
+ if (!ISP2(align))
+ return EINVAL;
+
+ size += sizeof(*btag);
+ size = P2ROUND(size, align * 2);
+ buf = mem_alloc(size);
+
+ if (buf == NULL)
+ return ENOMEM;
+
+ btag = (struct btag *)P2ROUND((unsigned long)buf + sizeof(*btag), align)
+ - 1;
+ btag->addr = buf;
+ btag->size = size;
+
+ *ptr = btag + 1;
+ return 0;
+}
+
+void free(void *ptr)
+{
+ struct btag *btag;
+
+ mem_init();
+
+ if (ptr == NULL)
+ return;
+
+ btag = (struct btag *)ptr - 1;
+ mem_free(btag->addr, btag->size);
+}
diff --git a/mem_malloc.h b/mem_malloc.h
new file mode 100644
index 0000000..6921c32
--- /dev/null
+++ b/mem_malloc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Libc-compatible malloc() functions.
+ *
+ * This code is messy, for testing purposes only, and disabled by default.
+ */
+
+#ifndef _MEM_MALLOC_H
+#define _MEM_MALLOC_H
+
+#include <stddef.h>
+
+void * malloc(size_t size);
+void * calloc(size_t nmemb, size_t size);
+void * realloc(void *ptr, size_t size);
+int posix_memalign(void **ptr, size_t align, size_t size);
+void free(void *ptr);
+
+#endif /* _MEM_MALLOC_H */
diff --git a/phys.c b/phys.c
new file mode 100644
index 0000000..5f36e14
--- /dev/null
+++ b/phys.c
@@ -0,0 +1,754 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Page allocator.
+ *
+ * This implementation uses the binary buddy system to manage its heap.
+ * Descriptions of the buddy system can be found in the following works :
+ * - "UNIX Internals: The New Frontiers", by Uresh Vahalia.
+ * - "Dynamic Storage Allocation: A Survey and Critical Review",
+ * by Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles.
+ *
+ * In addition, this allocator uses per-cpu pools of pages for level 0
+ * (i.e. single page) allocations. These pools act as caches (but are named
+ * differently to avoid confusion with CPU caches) that reduce contention on
+ * multiprocessor systems. When a pool is empty and cannot provide a page,
+ * it is filled by transferring multiple pages from the backend buddy system.
+ * The symmetric case is handled likewise.
+ */
+
+#include <sched.h>
+#include <stdio.h>
+#include <assert.h>
+#include <limits.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+
+#include "cpu.h"
+#include "list.h"
+#include "phys.h"
+#include "error.h"
+#include "macros.h"
+
+/*
+ * The system page size.
+ *
+ * This macro actually expands to a global variable that is set on
+ * initialization.
+ */
+#define PAGE_SIZE ((unsigned long)_pagesize)
+
+/*
+ * Maximum number of segments.
+ */
+#define PHYS_MAX_SEGMENTS 2
+
+/*
+ * Segment boundaries (in page frames).
+ */
+#define PHYS_ISADMA_LIMIT 0x1000
+#define PHYS_NORMAL_LIMIT 0x30000
+
+/*
+ * Number of segment lists.
+ */
+#define PHYS_NR_SEG_LISTS 2
+
+/*
+ * Segment list priorities.
+ *
+ * Higher priorities have lower numerical values.
+ */
+#define PHYS_SEGLIST_NORMAL 1
+#define PHYS_SEGLIST_ISADMA 0
+
+/*
+ * Number of free block lists per segment.
+ */
+#define PHYS_NR_FREE_LISTS 11
+
+/*
+ * The size of a CPU pool is computed by dividing the size of its containing
+ * segment by this value.
+ */
+#define PHYS_CPU_POOL_RATIO 1024
+
+/*
+ * Maximum number of pages in a CPU pool.
+ */
+#define PHYS_CPU_POOL_MAX_SIZE 128
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define PHYS_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Per-processor cache of pages.
+ */
+struct phys_cpu_pool {
+ pthread_mutex_t lock;
+ int size;
+ int transfer_size;
+ int nr_pages;
+ struct list pages;
+} __aligned(CPU_L1_SIZE);
+
+/*
+ * Special level value.
+ *
+ * When a page is free, its level is the index of its free list.
+ */
+#define PHYS_LEVEL_ALLOCATED PHYS_NR_FREE_LISTS
+
+/*
+ * Doubly-linked list of free blocks.
+ */
+struct phys_free_list {
+ unsigned long size;
+ struct list blocks;
+};
+
+/*
+ * Segment name buffer size.
+ */
+#define PHYS_NAME_SIZE 16
+
+/*
+ * Segment of contiguous memory.
+ */
+struct phys_seg {
+ struct phys_cpu_pool cpu_pools[NR_CPUS];
+
+ struct list node;
+ phys_pfn_t start;
+ phys_pfn_t end;
+ struct phys_page *pages;
+ struct phys_page *pages_end;
+ pthread_mutex_t lock;
+ struct phys_free_list free_lists[PHYS_NR_FREE_LISTS];
+ unsigned long nr_free_pages;
+ char name[PHYS_NAME_SIZE];
+};
+
+/*
+ * See PAGE_SIZE.
+ */
+static long _pagesize;
+
+/*
+ * Segment lists, ordered by priority (higher priority lists have lower
+ * numerical priorities).
+ */
+static struct list phys_seg_lists[PHYS_NR_SEG_LISTS];
+
+/*
+ * Segment table.
+ */
+static struct phys_seg phys_segs[PHYS_MAX_SEGMENTS];
+
+/*
+ * Number of loaded segments.
+ */
+static unsigned int phys_segs_size;
+
+static void phys_page_init(struct phys_page *page, struct phys_seg *seg,
+ phys_pfn_t pfn)
+{
+ page->seg = seg;
+ page->pfn = pfn;
+ page->level = PHYS_LEVEL_ALLOCATED;
+}
+
+static inline struct phys_page * phys_page_lookup(phys_pfn_t pfn)
+{
+ struct phys_seg *seg;
+ unsigned int i;
+
+ for (i = 0; i < phys_segs_size; i++) {
+ seg = &phys_segs[i];
+
+ if ((pfn >= seg->start) && (pfn < seg->end))
+ return &seg->pages[pfn - seg->start];
+ }
+
+ return NULL;
+}
+
+static void phys_free_list_init(struct phys_free_list *free_list)
+{
+ free_list->size = 0;
+ list_init(&free_list->blocks);
+}
+
+static inline void phys_free_list_insert(struct phys_free_list *free_list,
+ struct phys_page *page)
+{
+ assert(page->level == PHYS_LEVEL_ALLOCATED);
+
+ free_list->size++;
+ list_insert(&free_list->blocks, &page->node);
+}
+
+static inline void phys_free_list_remove(struct phys_free_list *free_list,
+ struct phys_page *page)
+{
+ assert(free_list->size != 0);
+ assert(!list_empty(&free_list->blocks));
+ assert(page->level < PHYS_NR_FREE_LISTS);
+
+ free_list->size--;
+ list_remove(&page->node);
+}
+
+static struct phys_page * phys_seg_alloc_from_buddy(struct phys_seg *seg,
+ unsigned int level)
+{
+ struct phys_free_list *free_list;
+ struct phys_page *page, *buddy;
+ unsigned int i;
+
+ assert(level < PHYS_NR_FREE_LISTS);
+
+ for (i = level; i < PHYS_NR_FREE_LISTS; i++) {
+ free_list = &seg->free_lists[i];
+
+ if (free_list->size != 0)
+ break;
+ }
+
+ if (i == PHYS_NR_FREE_LISTS)
+ return NULL;
+
+ page = list_first_entry(&free_list->blocks, struct phys_page, node);
+ phys_free_list_remove(free_list, page);
+ page->level = PHYS_LEVEL_ALLOCATED;
+
+ while (i > level) {
+ i--;
+ buddy = &page[1 << i];
+ phys_free_list_insert(&seg->free_lists[i], buddy);
+ buddy->level = i;
+ }
+
+ seg->nr_free_pages -= (1 << level);
+ return page;
+}
+
+static void phys_seg_free_to_buddy(struct phys_seg *seg,
+ struct phys_page *page,
+ unsigned int level)
+{
+ struct phys_page *buddy;
+ phys_pfn_t size, pfn, buddy_pfn;
+
+ assert(page >= seg->pages);
+ assert(page < seg->pages_end);
+ assert(page->level == PHYS_LEVEL_ALLOCATED);
+ assert(level < PHYS_NR_FREE_LISTS);
+
+ size = (1 << level);
+ pfn = page->pfn;
+
+ while (level < (PHYS_NR_FREE_LISTS - 1)) {
+ buddy_pfn = pfn ^ (1 << level);
+
+ if ((buddy_pfn < seg->start) || (buddy_pfn >= seg->end))
+ break;
+
+ buddy = &seg->pages[buddy_pfn - seg->start];
+
+ if (buddy->level != level)
+ break;
+
+ phys_free_list_remove(&seg->free_lists[level], buddy);
+ buddy->level = PHYS_LEVEL_ALLOCATED;
+ level++;
+ pfn &= -(1 << level);
+ page = &seg->pages[pfn - seg->start];
+ }
+
+ phys_free_list_insert(&seg->free_lists[level], page);
+ page->level = level;
+ seg->nr_free_pages += size;
+}
+
+static void phys_cpu_pool_init(struct phys_cpu_pool *cpu_pool,
+ phys_pfn_t size)
+{
+ cpu_pool->size = size;
+ cpu_pool->transfer_size = (size + PHYS_CPU_POOL_TRANSFER_RATIO - 1)
+ / PHYS_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->nr_pages = 0;
+ list_init(&cpu_pool->pages);
+}
+
+/*
+ * Return a CPU pool.
+ *
+ * This function will generally return the pool matching the CPU running the
+ * calling thread. Because of context switches and thread migration, the
+ * caller might be running on another processor after this function returns.
+ * Although not optimal, this should rarely happen, and it doesn't affect the
+ * allocator operations in any other way, as CPU pools are always valid, and
+ * their access is serialized by a lock.
+ */
+static inline struct phys_cpu_pool * phys_cpu_pool_get(struct phys_seg *seg)
+{
+ return &seg->cpu_pools[cpu_id()];
+}
+
+static inline struct phys_page *
+phys_cpu_pool_pop(struct phys_cpu_pool *cpu_pool)
+{
+ struct phys_page *page;
+
+ assert(cpu_pool->nr_pages != 0);
+ cpu_pool->nr_pages--;
+ page = list_first_entry(&cpu_pool->pages, struct phys_page, node);
+ list_remove(&page->node);
+ return page;
+}
+
+static inline void phys_cpu_pool_push(struct phys_cpu_pool *cpu_pool,
+ struct phys_page *page)
+{
+ assert(cpu_pool->nr_pages < cpu_pool->size);
+ cpu_pool->nr_pages++;
+ list_insert(&cpu_pool->pages, &page->node);
+}
+
+static int phys_cpu_pool_fill(struct phys_cpu_pool *cpu_pool,
+ struct phys_seg *seg)
+{
+ struct phys_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == 0);
+
+ pthread_mutex_lock(&seg->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ page = phys_seg_alloc_from_buddy(seg, 0);
+
+ if (page == NULL)
+ break;
+
+ phys_cpu_pool_push(cpu_pool, page);
+ }
+
+ pthread_mutex_unlock(&seg->lock);
+
+ return i;
+}
+
+static void phys_cpu_pool_drain(struct phys_cpu_pool *cpu_pool,
+ struct phys_seg *seg)
+{
+ struct phys_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == cpu_pool->size);
+
+ pthread_mutex_lock(&seg->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ page = phys_cpu_pool_pop(cpu_pool);
+ phys_seg_free_to_buddy(seg, page, 0);
+ }
+
+ pthread_mutex_unlock(&seg->lock);
+}
+
+static inline phys_pfn_t phys_seg_start(struct phys_seg *seg)
+{
+ return seg->start;
+}
+
+static inline phys_pfn_t phys_seg_end(struct phys_seg *seg)
+{
+ return seg->end;
+}
+
+static inline phys_pfn_t phys_seg_size(struct phys_seg *seg)
+{
+ return phys_seg_end(seg) - phys_seg_start(seg);
+}
+
+static phys_pfn_t phys_seg_compute_pool_size(phys_pfn_t seg_size)
+{
+ phys_pfn_t size;
+
+ size = seg_size / PHYS_CPU_POOL_RATIO;
+
+ if (size == 0)
+ size = 1;
+ else if (size > PHYS_CPU_POOL_MAX_SIZE)
+ size = PHYS_CPU_POOL_MAX_SIZE;
+
+ return size;
+}
+
+static void phys_seg_init(struct phys_seg *seg, struct phys_page *pages)
+{
+ phys_pfn_t seg_size, pool_size;
+ unsigned int i;
+
+ seg_size = phys_seg_size(seg);
+ pool_size = phys_seg_compute_pool_size(seg_size);
+
+ for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++)
+ phys_cpu_pool_init(&seg->cpu_pools[i], pool_size);
+
+ seg->pages = pages;
+ seg->pages_end = pages + seg_size;
+ pthread_mutex_init(&seg->lock, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
+ phys_free_list_init(&seg->free_lists[i]);
+
+ seg->nr_free_pages = 0;
+
+ for (i = 0; i < seg_size; i++)
+ phys_page_init(&pages[i], seg, seg->start + i);
+}
+
+/*
+ * Return the level (i.e. the index in the free lists array) matching the
+ * given size.
+ */
+static inline unsigned int phys_get_level(phys_pfn_t size)
+{
+ assert(size != 0);
+
+ size--;
+
+ if (size == 0)
+ return 0;
+ else
+ return (sizeof(size) * CHAR_BIT) - __builtin_clzl(size);
+}
+
+static struct phys_page * phys_seg_alloc(struct phys_seg *seg, phys_pfn_t size)
+{
+ struct phys_cpu_pool *cpu_pool;
+ struct phys_page *page;
+ unsigned int level;
+ int filled;
+
+ level = phys_get_level(size);
+
+ if (level == 0) {
+ cpu_pool = phys_cpu_pool_get(seg);
+
+ pthread_mutex_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == 0) {
+ filled = phys_cpu_pool_fill(cpu_pool, seg);
+
+ if (!filled) {
+ pthread_mutex_unlock(&cpu_pool->lock);
+ return NULL;
+ }
+ }
+
+ page = phys_cpu_pool_pop(cpu_pool);
+ pthread_mutex_unlock(&cpu_pool->lock);
+ } else {
+ pthread_mutex_lock(&seg->lock);
+ page = phys_seg_alloc_from_buddy(seg, level);
+ pthread_mutex_unlock(&seg->lock);
+ }
+
+ return page;
+}
+
+static void phys_seg_free(struct phys_seg *seg, struct phys_page *page,
+ phys_pfn_t size)
+{
+ struct phys_cpu_pool *cpu_pool;
+ unsigned int level;
+
+ level = phys_get_level(size);
+
+ if (level == 0) {
+ cpu_pool = phys_cpu_pool_get(seg);
+
+ pthread_mutex_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == cpu_pool->size)
+ phys_cpu_pool_drain(cpu_pool, seg);
+
+ phys_cpu_pool_push(cpu_pool, page);
+ pthread_mutex_unlock(&cpu_pool->lock);
+ } else {
+ pthread_mutex_lock(&seg->lock);
+ phys_seg_free_to_buddy(seg, page, level);
+ pthread_mutex_unlock(&seg->lock);
+ }
+}
+
+/*
+ * Load memory during initialization.
+ *
+ * This function partially initializes a segment.
+ */
+static void phys_load_segment(const char *name, phys_pfn_t start,
+ phys_pfn_t end, unsigned int seg_list_prio)
+{
+ static int initialized = 0;
+ struct phys_seg *seg;
+ struct list *seg_list;
+ unsigned int i;
+
+ assert(name != NULL);
+ assert(start < end);
+ assert(seg_list_prio < ARRAY_SIZE(phys_seg_lists));
+
+ if (!initialized) {
+ for (i = 0; i < ARRAY_SIZE(phys_seg_lists); i++)
+ list_init(&phys_seg_lists[i]);
+
+ phys_segs_size = 0;
+ initialized = 1;
+ }
+
+ if (phys_segs_size >= ARRAY_SIZE(phys_segs))
+ error_die(ERR_NORES);
+
+ seg_list = &phys_seg_lists[seg_list_prio];
+ seg = &phys_segs[phys_segs_size];
+
+ list_insert_tail(seg_list, &seg->node);
+ seg->start = start;
+ seg->end = end;
+ strncpy(seg->name, name, PHYS_NAME_SIZE);
+ seg->name[sizeof(seg->name) - 1] = '\0';
+
+ phys_segs_size++;
+}
+
+/*
+ * Loading segments is normally done by architecture-specific code. In
+ * this implementation, an Intel machine with two segments of RAM is
+ * virtualized.
+ */
+static void phys_load_segments(void)
+{
+ phys_pfn_t start, end;
+ size_t size;
+ void *addr;
+
+ /*
+ * Load the ISADMA segment.
+ */
+ size = PHYS_ISADMA_LIMIT * PAGE_SIZE;
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (addr == MAP_FAILED)
+ error_die(ERR_NOMEM);
+
+ start = (unsigned long)addr / PAGE_SIZE;
+ end = start + (size / PAGE_SIZE);
+ phys_load_segment("isadma", start, end, PHYS_SEGLIST_ISADMA);
+
+ /*
+ * Load the normal segment.
+ */
+ size = (PHYS_NORMAL_LIMIT - PHYS_ISADMA_LIMIT) * PAGE_SIZE;
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (addr == MAP_FAILED)
+ error_die(ERR_NOMEM);
+
+ start = (unsigned long)addr / PAGE_SIZE;
+ end = start + (size / PAGE_SIZE);
+ phys_load_segment("normal", start, end, PHYS_SEGLIST_NORMAL);
+}
+
+void phys_init(void)
+{
+ struct phys_seg *seg, *map_seg;
+ struct phys_page *page, *map;
+ struct list *seg_list;
+ phys_pfn_t start, map_size;
+ unsigned int i;
+
+ _pagesize = sysconf(_SC_PAGESIZE);
+ assert(ISP2(_pagesize));
+
+ phys_load_segments();
+
+ /*
+ * Compute the memory map size.
+ */
+ map_size = 0;
+
+ for (i = 0; i < phys_segs_size; i++)
+ map_size += phys_seg_size(&phys_segs[i]);
+
+ map_size = P2ROUND(map_size * sizeof(struct phys_page), PAGE_SIZE)
+ / PAGE_SIZE;
+
+ /*
+ * Find a segment from which to allocate the memory map.
+ */
+ for (seg_list = &phys_seg_lists[ARRAY_SIZE(phys_seg_lists) - 1];
+ seg_list >= phys_seg_lists;
+ seg_list--)
+ list_for_each_entry(seg_list, map_seg, node)
+ if (map_size <= phys_seg_size(map_seg))
+ goto found;
+
+ error_die(ERR_NOMEM);
+
+found:
+ /*
+ * Allocate the memory map.
+ */
+ map = (struct phys_page *)(phys_seg_start(map_seg) * PAGE_SIZE);
+
+ /*
+ * Initialize the segments, associating them to the memory map. When
+ * the segments are initialized, all their pages are set allocated,
+ * with a block size of one (level 0). They are then released, which
+ * populates the free lists.
+ */
+ start = 0;
+
+ for (i = 0; i < phys_segs_size; i++) {
+ seg = &phys_segs[i];
+ phys_seg_init(seg, map + start);
+
+ /*
+ * Don't release the memory map pages.
+ *
+ * XXX The memory map pages normally don't need descriptors, as they
+ * are never released. This implementation however can be used in
+ * cases where some memory is reserved at the start of all segments.
+ * In order not to require descriptors for the memory map, the segment
+ * where the map resides should be split. As it is quite cumbersome,
+ * no effort is made here to avoid wasting descriptors and the pages
+ * containing them.
+ */
+ if (seg == map_seg)
+ page = seg->pages + map_size;
+ else
+ page = seg->pages;
+
+ while (page < seg->pages_end) {
+ phys_seg_free_to_buddy(seg, page, 0);
+ page++;
+ }
+
+ start += phys_seg_size(seg);
+ }
+}
+
+struct phys_page * phys_alloc_pages(phys_pfn_t size)
+{
+ struct list *seg_list;
+ struct phys_seg *seg;
+ struct phys_page *page;
+
+ for (seg_list = &phys_seg_lists[ARRAY_SIZE(phys_seg_lists) - 1];
+ seg_list >= phys_seg_lists;
+ seg_list--)
+ list_for_each_entry(seg_list, seg, node) {
+ page = phys_seg_alloc(seg, size);
+
+ if (page != NULL)
+ return page;
+ }
+
+ return NULL;
+}
+
+void phys_free_pages(struct phys_page *page, phys_pfn_t size)
+{
+ phys_seg_free(page->seg, page, size);
+}
+
+phys_pfn_t phys_alloc(phys_pfn_t size)
+{
+ struct phys_page *page;
+
+ page = phys_alloc_pages(size);
+
+ /*
+ * XXX Rely on the system to never provide a virtual memory area
+ * starting at 0.
+ */
+ if (page == NULL)
+ return 0;
+
+ return page->pfn;
+}
+
+void phys_free(phys_pfn_t pfn, phys_pfn_t size)
+{
+ struct phys_page *page;
+
+ page = phys_page_lookup(pfn);
+ assert(page != NULL);
+ phys_free_pages(page, size);
+}
+
+void phys_info(void)
+{
+ struct phys_seg *seg;
+ unsigned int i, j;
+ char name[16];
+
+ printf(" name");
+
+ for (i = 0; i < PHYS_NR_FREE_LISTS; i++) {
+ snprintf(name, sizeof(name), "#%u", (1 << i));
+ printf(" %5s", name);
+ }
+
+ printf("\n");
+
+ for (i = 0; i < phys_segs_size; i++) {
+ seg = &phys_segs[i];
+
+ printf("%8s", seg->name);
+
+ pthread_mutex_lock(&seg->lock);
+
+ for (j = 0; j < ARRAY_SIZE(seg->free_lists); j++)
+ printf(" %5lu", seg->free_lists[j].size);
+
+ pthread_mutex_unlock(&seg->lock);
+
+ printf("\n");
+ }
+}
diff --git a/phys.h b/phys.h
new file mode 100644
index 0000000..5b06521
--- /dev/null
+++ b/phys.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Page allocator.
+ */
+
+#ifndef _PHYS_H
+#define _PHYS_H
+
+#include "list.h"
+
+/*
+ * Page frame number.
+ */
+typedef unsigned long phys_pfn_t;
+
+/*
+ * Page descriptor.
+ */
+struct phys_page {
+ struct phys_seg *seg;
+ phys_pfn_t pfn;
+ unsigned int level;
+ struct list node;
+};
+
+void phys_init(void);
+
+struct phys_page * phys_alloc_pages(phys_pfn_t size);
+
+void phys_free_pages(struct phys_page *page, phys_pfn_t size);
+
+phys_pfn_t phys_alloc(phys_pfn_t size);
+
+void phys_free(phys_pfn_t pfn, phys_pfn_t size);
+
+void phys_info(void);
+
+#endif /* _PHYS_H */
diff --git a/rbtree.c b/rbtree.c
new file mode 100644
index 0000000..177050e
--- /dev/null
+++ b/rbtree.c
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "macros.h"
+#include "rbtree.h"
+#include "rbtree_i.h"
+
+/*
+ * Return the index of a node in the children array of its parent.
+ *
+ * The parent parameter must not be null, and must be the parent of the
+ * given node.
+ */
+static inline int rbtree_index(const struct rbtree_node *node,
+ const struct rbtree_node *parent)
+{
+ assert(parent != NULL);
+ assert((node == NULL) || (rbtree_parent(node) == parent));
+
+ if (parent->children[RBTREE_LEFT] == node)
+ return RBTREE_LEFT;
+
+ assert(parent->children[RBTREE_RIGHT] == node);
+
+ return RBTREE_RIGHT;
+}
+
+/*
+ * Return the color of a node.
+ */
+static inline int rbtree_color(const struct rbtree_node *node)
+{
+ return node->parent & RBTREE_COLOR_MASK;
+}
+
+/*
+ * Return true if the node is red.
+ */
+static inline int rbtree_is_red(const struct rbtree_node *node)
+{
+ return rbtree_color(node) == RBTREE_COLOR_RED;
+}
+
+/*
+ * Return true if the node is black.
+ */
+static inline int rbtree_is_black(const struct rbtree_node *node)
+{
+ return rbtree_color(node) == RBTREE_COLOR_BLACK;
+}
+
+/*
+ * Set the parent of a node, retaining its current color.
+ */
+static inline void rbtree_set_parent(struct rbtree_node *node,
+ struct rbtree_node *parent)
+{
+ assert(rbtree_check_alignment(node));
+ assert(rbtree_check_alignment(parent));
+
+ node->parent = (unsigned long)parent | (node->parent & RBTREE_COLOR_MASK);
+}
+
+/*
+ * Set the color of a node, retaining its current parent.
+ */
+static inline void rbtree_set_color(struct rbtree_node *node, int color)
+{
+ assert((color & ~RBTREE_COLOR_MASK) == 0);
+ node->parent = (node->parent & RBTREE_PARENT_MASK) | color;
+}
+
+/*
+ * Set the color of a node to red, retaining its current parent.
+ */
+static inline void rbtree_set_red(struct rbtree_node *node)
+{
+ rbtree_set_color(node, RBTREE_COLOR_RED);
+}
+
+/*
+ * Set the color of a node to black, retaining its current parent.
+ */
+static inline void rbtree_set_black(struct rbtree_node *node)
+{
+ rbtree_set_color(node, RBTREE_COLOR_BLACK);
+}
+
+/*
+ * Perform a tree rotation, rooted at the given node.
+ *
+ * The direction parameter defines the rotation direction and is either
+ * RBTREE_LEFT or RBTREE_RIGHT.
+ */
+static void rbtree_rotate(struct rbtree *tree, struct rbtree_node *node,
+ int direction)
+{
+ struct rbtree_node *parent, *rnode;
+ int left, right;
+
+ left = direction;
+ right = 1 - left;
+ parent = rbtree_parent(node);
+ rnode = node->children[right];
+
+ node->children[right] = rnode->children[left];
+
+ if (rnode->children[left] != NULL)
+ rbtree_set_parent(rnode->children[left], node);
+
+ rnode->children[left] = node;
+ rbtree_set_parent(rnode, parent);
+
+ if (unlikely(parent == NULL))
+ tree->root = rnode;
+ else
+ parent->children[rbtree_index(node, parent)] = rnode;
+
+ rbtree_set_parent(node, rnode);
+}
+
+void rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
+ int index, struct rbtree_node *node)
+{
+ struct rbtree_node *grand_parent, *uncle, *tmp;
+ int left, right;
+
+ assert(rbtree_check_alignment(parent));
+ assert(rbtree_check_alignment(node));
+
+ node->parent = (unsigned long)parent | RBTREE_COLOR_RED;
+ node->children[RBTREE_LEFT] = NULL;
+ node->children[RBTREE_RIGHT] = NULL;
+
+ if (unlikely(parent == NULL))
+ tree->root = node;
+ else
+ parent->children[index] = node;
+
+ for (;;) {
+ if (parent == NULL) {
+ rbtree_set_black(node);
+ break;
+ }
+
+ if (rbtree_is_black(parent))
+ break;
+
+ grand_parent = rbtree_parent(parent);
+ assert(grand_parent != NULL);
+
+ left = rbtree_index(parent, grand_parent);
+ right = 1 - left;
+
+ uncle = grand_parent->children[right];
+
+ /*
+ * Case 1: uncle is red. Flip colors and repeat at grand parent.
+ */
+ if ((uncle != NULL) && rbtree_is_red(uncle)) {
+ rbtree_set_black(uncle);
+ rbtree_set_black(parent);
+ rbtree_set_red(grand_parent);
+ node = grand_parent;
+ parent = rbtree_parent(node);
+ continue;
+ }
+
+ /*
+ * Case 2: node is the right child of its parent. Rotate left at parent
+ * to reduce to case 3.
+ */
+ if (parent->children[right] == node) {
+ rbtree_rotate(tree, parent, left);
+ tmp = node;
+ node = parent;
+ parent = tmp;
+ }
+
+ /*
+ * Case 3: node is the left child of its parent. Handle colors, rotate
+ * right at grand parent, and leave.
+ */
+ rbtree_set_black(parent);
+ rbtree_set_red(grand_parent);
+ rbtree_rotate(tree, grand_parent, right);
+ break;
+ }
+
+ assert(rbtree_is_black(tree->root));
+}
+
+void
+rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
+{
+ struct rbtree_node *child, *parent, *brother;
+ int color, left, right;
+
+ if (node->children[RBTREE_LEFT] == NULL)
+ child = node->children[RBTREE_RIGHT];
+ else if (node->children[RBTREE_RIGHT] == NULL)
+ child = node->children[RBTREE_LEFT];
+ else {
+ struct rbtree_node *successor;
+
+ /*
+ * Two-children case: replace the node with its successor.
+ */
+
+ successor = node->children[RBTREE_RIGHT];
+
+ while (successor->children[RBTREE_LEFT] != NULL)
+ successor = successor->children[RBTREE_LEFT];
+
+ color = rbtree_color(successor);
+ child = successor->children[RBTREE_RIGHT];
+ parent = rbtree_parent(node);
+
+ if (unlikely(parent == NULL))
+ tree->root = successor;
+ else
+ parent->children[rbtree_index(node, parent)] = successor;
+
+ parent = rbtree_parent(successor);
+
+ /*
+ * Set parent directly to keep the original color.
+ */
+ successor->parent = node->parent;
+ successor->children[RBTREE_LEFT] = node->children[RBTREE_LEFT];
+ rbtree_set_parent(successor->children[RBTREE_LEFT], successor);
+
+ if (node == parent)
+ parent = successor;
+ else {
+ successor->children[RBTREE_RIGHT] = node->children[RBTREE_RIGHT];
+ rbtree_set_parent(successor->children[RBTREE_RIGHT], successor);
+ parent->children[RBTREE_LEFT] = child;
+
+ if (child != NULL)
+ rbtree_set_parent(child, parent);
+ }
+
+ goto update_color;
+ }
+
+ /*
+ * Node has at most one child.
+ */
+
+ color = rbtree_color(node);
+ parent = rbtree_parent(node);
+
+ if (child != NULL)
+ rbtree_set_parent(child, parent);
+
+ if (unlikely(parent == NULL))
+ tree->root = child;
+ else
+ parent->children[rbtree_index(node, parent)] = child;
+
+ /*
+ * The node has been removed, update the colors. The child pointer can
+ * be null, in which case it is considered a black leaf.
+ */
+update_color:
+ if (color == RBTREE_COLOR_RED)
+ return;
+
+ for (;;) {
+ if ((child != NULL) && rbtree_is_red(child)) {
+ rbtree_set_black(child);
+ break;
+ }
+
+ if (parent == NULL)
+ break;
+
+ left = rbtree_index(child, parent);
+ right = 1 - left;
+
+ brother = parent->children[right];
+
+ /*
+ * Case 1: brother is red. Recolor and rotate left at parent so that
+ * brother becomes black.
+ */
+ if (rbtree_is_red(brother)) {
+ rbtree_set_black(brother);
+ rbtree_set_red(parent);
+ rbtree_rotate(tree, parent, left);
+ brother = parent->children[right];
+ }
+
+ /*
+ * Case 2: brother has no red child. Recolor and repeat at parent.
+ */
+ if (((brother->children[RBTREE_LEFT] == NULL)
+ || rbtree_is_black(brother->children[RBTREE_LEFT]))
+ && ((brother->children[RBTREE_RIGHT] == NULL)
+ || rbtree_is_black(brother->children[RBTREE_RIGHT]))) {
+ rbtree_set_red(brother);
+ child = parent;
+ parent = rbtree_parent(child);
+ continue;
+ }
+
+ /*
+ * Case 3: brother's right child is black. Recolor and rotate right
+ * at brother to reduce to case 4.
+ */
+ if ((brother->children[right] == NULL)
+ || rbtree_is_black(brother->children[right])) {
+ rbtree_set_black(brother->children[left]);
+ rbtree_set_red(brother);
+ rbtree_rotate(tree, brother, right);
+ brother = parent->children[right];
+ }
+
+ /*
+ * Case 4: brother's left child is black. Exchange parent and brother
+ * colors (we already know brother is black), set brother's right child
+ * black, rotate left at parent and leave.
+ */
+ rbtree_set_color(brother, rbtree_color(parent));
+ rbtree_set_black(parent);
+ rbtree_set_black(brother->children[right]);
+ rbtree_rotate(tree, parent, left);
+ break;
+ }
+
+ assert((tree->root == NULL) || rbtree_is_black(tree->root));
+}
+
+struct rbtree_node * rbtree_nearest(struct rbtree_node *parent, int index,
+ int direction)
+{
+ assert(rbtree_check_index(direction));
+
+ if (parent == NULL)
+ return NULL;
+
+ assert(rbtree_check_index(index));
+
+ if (index != direction)
+ return parent;
+
+ return rbtree_walk(parent, direction);
+}
+
+struct rbtree_node * rbtree_firstlast(const struct rbtree *tree, int direction)
+{
+ struct rbtree_node *prev, *cur;
+
+ assert(rbtree_check_index(direction));
+
+ prev = NULL;
+
+ for (cur = tree->root; cur != NULL; cur = cur->children[direction])
+ prev = cur;
+
+ return prev;
+}
+
+struct rbtree_node * rbtree_walk(struct rbtree_node *node, int direction)
+{
+ int left, right;
+
+ assert(rbtree_check_index(direction));
+
+ left = direction;
+ right = 1 - left;
+
+ if (node == NULL)
+ return NULL;
+
+ if (node->children[left] != NULL) {
+ node = node->children[left];
+
+ while (node->children[right] != NULL)
+ node = node->children[right];
+ } else {
+ struct rbtree_node *parent;
+ int index;
+
+ for (;;) {
+ parent = rbtree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = rbtree_index(node, parent);
+ node = parent;
+
+ if (index == right)
+ break;
+ }
+ }
+
+ return node;
+}
+
+/*
+ * Return the left-most deepest child node of the given node.
+ */
+static struct rbtree_node * rbtree_find_deepest(struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+
+ assert(node != NULL);
+
+ for (;;) {
+ parent = node;
+ node = node->children[RBTREE_LEFT];
+
+ if (node == NULL) {
+ node = parent->children[RBTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+ }
+ }
+}
+
+struct rbtree_node * rbtree_postwalk_deepest(const struct rbtree *tree)
+{
+ struct rbtree_node *node;
+
+ node = tree->root;
+
+ if (node == NULL)
+ return NULL;
+
+ return rbtree_find_deepest(node);
+}
+
+struct rbtree_node * rbtree_postwalk_unlink(struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+ int index;
+
+ if (node == NULL)
+ return NULL;
+
+ assert(node->children[RBTREE_LEFT] == NULL);
+ assert(node->children[RBTREE_RIGHT] == NULL);
+
+ parent = rbtree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = rbtree_index(node, parent);
+ parent->children[index] = NULL;
+ node = parent->children[RBTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+
+ return rbtree_find_deepest(node);
+}
diff --git a/rbtree.h b/rbtree.h
new file mode 100644
index 0000000..fb59515
--- /dev/null
+++ b/rbtree.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Red-black tree.
+ */
+
+#ifndef _RBTREE_H
+#define _RBTREE_H
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "macros.h"
+
+/*
+ * Indexes of the left and right nodes in the children array of a node.
+ */
+#define RBTREE_LEFT 0
+#define RBTREE_RIGHT 1
+
+/*
+ * Red-black node.
+ */
+struct rbtree_node;
+
+/*
+ * Red-black tree.
+ */
+struct rbtree;
+
+/*
+ * Static tree initializer.
+ */
+#define RBTREE_INITIALIZER { NULL }
+
+#include "rbtree_i.h"
+
+/*
+ * Initialize a tree.
+ */
+static inline void rbtree_init(struct rbtree *tree)
+{
+ tree->root = NULL;
+}
+
+/*
+ * Initialize a node.
+ *
+ * A node is in no tree when its parent points to itself.
+ */
+static inline void rbtree_node_init(struct rbtree_node *node)
+{
+ assert(rbtree_check_alignment(node));
+
+ node->parent = (unsigned long)node | RBTREE_COLOR_RED;
+ node->children[RBTREE_LEFT] = NULL;
+ node->children[RBTREE_RIGHT] = NULL;
+}
+
+/*
+ * Return true if node is in no tree.
+ */
+static inline int rbtree_node_unlinked(const struct rbtree_node *node)
+{
+ return rbtree_parent(node) == node;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define rbtree_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return true if tree is empty.
+ */
+static inline int rbtree_empty(const struct rbtree *tree)
+{
+ return tree->root == NULL;
+}
+
+/*
+ * Look up a node in a tree.
+ *
+ * Note that implementing the lookup algorithm as a macro gives two benefits:
+ * First, it avoids the overhead of a callback function. Next, the type of the
+ * cmp_fn parameter isn't rigid. The only guarantee offered by this
+ * implementation is that the key parameter is the first parameter given to
+ * cmp_fn. This way, users can pass only the value they need for comparison
+ * instead of e.g. allocating a full structure on the stack.
+ *
+ * See rbtree_insert().
+ */
+#define rbtree_lookup(tree, key, cmp_fn) \
+MACRO_BEGIN \
+ struct rbtree_node *cur; \
+ int diff; \
+ \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(key, cur); \
+ \
+ if (diff == 0) \
+ break; \
+ \
+ cur = cur->children[rbtree_d2i(diff)]; \
+ } \
+ \
+ cur; \
+MACRO_END
+
+/*
+ * Look up a node or one of its nearest nodes in a tree.
+ *
+ * This macro essentially acts as rbtree_lookup() but if no entry matched
+ * the key, an additional step is performed to obtain the next or previous
+ * node, depending on the direction (left or right).
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * rbtree_lookup().
+ */
+#define rbtree_lookup_nearest(tree, key, cmp_fn, dir) \
+MACRO_BEGIN \
+ struct rbtree_node *cur, *prev; \
+ int diff, index; \
+ \
+ prev = NULL; \
+ index = -1; \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(key, cur); \
+ \
+ if (diff == 0) \
+ break; \
+ \
+ prev = cur; \
+ index = rbtree_d2i(diff); \
+ cur = cur->children[index]; \
+ } \
+ \
+ if (cur == NULL) \
+ cur = rbtree_nearest(prev, index, dir); \
+ \
+ cur; \
+MACRO_END
+
+/*
+ * Insert a node in a tree.
+ *
+ * This macro performs a standard lookup to obtain the insertion point of
+ * the given node in the tree (it is assumed that the inserted node never
+ * compares equal to any other entry in the tree) and links the node. It
+ * then It then checks red-black rules violations, and rebalances the tree
+ * if necessary.
+ *
+ * Unlike rbtree_lookup(), the cmp_fn parameter must compare two complete
+ * entries, so it is suggested to use two different comparison inline
+ * functions, such as myobj_cmp_lookup() and myobj_cmp_insert(). There is no
+ * guarantee about the order of the nodes given to the comparison function.
+ *
+ * See rbtree_lookup().
+ */
+#define rbtree_insert(tree, node, cmp_fn) \
+MACRO_BEGIN \
+ struct rbtree_node *cur, *prev; \
+ int diff, index; \
+ \
+ prev = NULL; \
+ index = -1; \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(node, cur); \
+ assert(diff != 0); \
+ prev = cur; \
+ index = rbtree_d2i(diff); \
+ cur = cur->children[index]; \
+ } \
+ \
+ rbtree_insert_rebalance(tree, prev, index, node); \
+MACRO_END
+
+/*
+ * Look up a node/slot pair in a tree.
+ *
+ * This macro essentially acts as rbtree_lookup() but in addition to a node,
+ * it also returns a slot, which identifies an insertion point in the tree.
+ * If the returned node is null, the slot can be used by rbtree_insert_slot()
+ * to insert without the overhead of an additional lookup. The slot is a
+ * simple unsigned long integer.
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * rbtree_lookup().
+ */
+#define rbtree_lookup_slot(tree, key, cmp_fn, slot) \
+MACRO_BEGIN \
+ struct rbtree_node *cur, *prev; \
+ int diff, index; \
+ \
+ prev = NULL; \
+ index = 0; \
+ cur = (tree)->root; \
+ \
+ while (cur != NULL) { \
+ diff = cmp_fn(key, cur); \
+ \
+ if (diff == 0) \
+ break; \
+ \
+ prev = cur; \
+ index = rbtree_d2i(diff); \
+ cur = cur->children[index]; \
+ } \
+ \
+ (slot) = rbtree_slot(prev, index); \
+ cur; \
+MACRO_END
+
+/*
+ * Insert a node at an insertion point in a tree.
+ *
+ * This macro essentially acts as rbtree_insert() except that it doesn't
+ * obtain the insertion point with a standard lookup. The insertion point
+ * is obtained by calling rbtree_lookup_slot(). In addition, the new node
+ * must not compare equal to an existing node in the tree (i.e. the slot
+ * must denote a null node).
+ */
+#define rbtree_insert_slot(tree, slot, node) \
+MACRO_BEGIN \
+ struct rbtree_node *parent; \
+ int index; \
+ \
+ parent = rbtree_slot_parent(slot); \
+ index = rbtree_slot_index(slot); \
+ rbtree_insert_rebalance(tree, parent, index, node); \
+MACRO_END
+
+/*
+ * Remove a node from a tree.
+ *
+ * After completion, the node is stale.
+ */
+void rbtree_remove(struct rbtree *tree, struct rbtree_node *node);
+
+/*
+ * Return the first node of a tree.
+ */
+#define rbtree_first(tree) rbtree_firstlast(tree, RBTREE_LEFT)
+
+/*
+ * Return the last node of a tree.
+ */
+#define rbtree_last(tree) rbtree_firstlast(tree, RBTREE_RIGHT)
+
+/*
+ * Return the node previous to the given node.
+ */
+#define rbtree_prev(node) rbtree_walk(node, RBTREE_LEFT)
+
+/*
+ * Return the node next to the given node.
+ */
+#define rbtree_next(node) rbtree_walk(node, RBTREE_RIGHT)
+
+/*
+ * Forge a loop to process all nodes of a tree, removing them when visited.
+ *
+ * This macro can only be used to destroy a tree, so that the resources used
+ * by the entries can be released by the user. It basically removes all nodes
+ * without doing any color checking.
+ *
+ * After completion, all nodes and the tree root member are stale.
+ */
+#define rbtree_for_each_remove(tree, node, tmp) \
+for (node = rbtree_postwalk_deepest(tree), \
+ tmp = rbtree_postwalk_unlink(node); \
+ node != NULL; \
+ node = tmp, tmp = rbtree_postwalk_unlink(node)) \
+
+#endif /* _RBTREE_H */
diff --git a/rbtree_i.h b/rbtree_i.h
new file mode 100644
index 0000000..c54f3c1
--- /dev/null
+++ b/rbtree_i.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RBTREE_I_H
+#define _RBTREE_I_H
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "macros.h"
+
+/*
+ * Red-black node structure.
+ *
+ * To reduce the number of branches and the instruction cache footprint,
+ * the left and right child pointers are stored in an array, and the symmetry
+ * of most tree operations is exploited by using left/right variables when
+ * referring to children.
+ *
+ * In addition, this implementation assumes that all nodes are 4-byte aligned,
+ * so that the least significant bit of the parent member can be used to store
+ * the color of the node. This is true for all modern 32 and 64 bits
+ * architectures, as long as the nodes aren't embedded in structures with
+ * special alignment constraints such as member packing.
+ */
+struct rbtree_node {
+ unsigned long parent;
+ struct rbtree_node *children[2];
+};
+
+/*
+ * Red-black tree structure.
+ */
+struct rbtree {
+ struct rbtree_node *root;
+};
+
+/*
+ * Masks applied on the parent member of a node to obtain either the
+ * color or the parent address.
+ */
+#define RBTREE_COLOR_MASK 0x1UL
+#define RBTREE_PARENT_MASK (~0x3UL)
+
+/*
+ * Node colors.
+ */
+#define RBTREE_COLOR_RED 0
+#define RBTREE_COLOR_BLACK 1
+
+/*
+ * Masks applied on slots to obtain either the child index or the parent
+ * address.
+ */
+#define RBTREE_SLOT_INDEX_MASK 0x1UL
+#define RBTREE_SLOT_PARENT_MASK (~RBTREE_SLOT_INDEX_MASK)
+
+/*
+ * Return true if the given pointer is suitably aligned.
+ */
+static inline int rbtree_check_alignment(const struct rbtree_node *node)
+{
+ return ((unsigned long)node & (~RBTREE_PARENT_MASK)) == 0;
+}
+
+/*
+ * Return true if the given index is a valid child index.
+ */
+static inline int rbtree_check_index(int index)
+{
+ return index == (index & 1);
+}
+
+/*
+ * Convert the result of a comparison into an index in the children array
+ * (0 or 1).
+ *
+ * This function is mostly used when looking up a node.
+ */
+static inline int rbtree_d2i(int diff)
+{
+ return !(diff <= 0);
+}
+
+/*
+ * Return the parent of a node.
+ */
+static inline struct rbtree_node * rbtree_parent(const struct rbtree_node *node)
+{
+ return (struct rbtree_node *)(node->parent & RBTREE_PARENT_MASK);
+}
+
+/*
+ * Translate an insertion point into a slot.
+ */
+static inline unsigned long rbtree_slot(struct rbtree_node *parent, int index)
+{
+ assert(rbtree_check_alignment(parent));
+ assert(rbtree_check_index(index));
+ return (unsigned long)parent | index;
+}
+
+/*
+ * Extract the parent address from a slot.
+ */
+static inline struct rbtree_node * rbtree_slot_parent(unsigned long slot)
+{
+ return (struct rbtree_node *)(slot & RBTREE_SLOT_PARENT_MASK);
+}
+
+/*
+ * Extract the index from a slot.
+ */
+static inline int rbtree_slot_index(unsigned long slot)
+{
+ return slot & RBTREE_SLOT_INDEX_MASK;
+}
+
+/*
+ * Insert a node in a tree, rebalancing it if necessary.
+ *
+ * The index parameter is the index in the children array of the parent where
+ * the new node is to be inserted. It is ignored if the parent is null.
+ *
+ * This function is intended to be used by the rbtree_insert() macro only.
+ */
+void rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
+ int index, struct rbtree_node *node);
+
+/*
+ * Return the previous or next node relative to a location in a tree.
+ *
+ * The parent and index parameters define the location, which can be empty.
+ * The direction parameter is either RBTREE_LEFT (to obtain the previous
+ * node) or RBTREE_RIGHT (to obtain the next one).
+ */
+struct rbtree_node * rbtree_nearest(struct rbtree_node *parent, int index,
+ int direction);
+
+/*
+ * Return the first or last node of a tree.
+ *
+ * The direction parameter is either RBTREE_LEFT (to obtain the first node)
+ * or RBTREE_RIGHT (to obtain the last one).
+ */
+struct rbtree_node * rbtree_firstlast(const struct rbtree *tree, int direction);
+
+/*
+ * Return the node next to, or previous to the given node.
+ *
+ * The direction parameter is either RBTREE_LEFT (to obtain the previous node)
+ * or RBTREE_RIGHT (to obtain the next one).
+ */
+struct rbtree_node * rbtree_walk(struct rbtree_node *node, int direction);
+
+/*
+ * Return the left-most deepest node of a tree, which is the starting point of
+ * the postorder traversal performed by rbtree_for_each_remove().
+ */
+struct rbtree_node * rbtree_postwalk_deepest(const struct rbtree *tree);
+
+/*
+ * Unlink a node from its tree and return the next (right) node in postorder.
+ */
+struct rbtree_node * rbtree_postwalk_unlink(struct rbtree_node *node);
+
+#endif /* _RBTREE_I_H */
diff --git a/rdatree.c b/rdatree.c
new file mode 100644
index 0000000..a233957
--- /dev/null
+++ b/rdatree.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "error.h"
+#include "macros.h"
+#include "rdatree.h"
+
+/*
+ * Global properties used to shape radix trees.
+ */
+#define RDATREE_RADIX 6
+#define RDATREE_RADIX_SIZE (1UL << RDATREE_RADIX)
+#define RDATREE_RADIX_MASK (RDATREE_RADIX_SIZE - 1)
+
+#if RDATREE_RADIX < 6
+typedef unsigned long rdatree_bm_t;
+#define rdatree_ffs(x) __builtin_ffsl(x)
+#elif RDATREE_RADIX == 6 /* RDATREE_RADIX < 6 */
+#ifdef __LP64__
+typedef unsigned long rdatree_bm_t;
+#define rdatree_ffs(x) __builtin_ffsl(x)
+#else /* __LP64__ */
+typedef unsigned long long rdatree_bm_t;
+#define rdatree_ffs(x) __builtin_ffsll(x)
+#endif /* __LP64__ */
+#else /* RDATREE_RADIX < 6 */
+#error "radix too high"
+#endif /* RDATREE_RADIX < 6 */
+
+/*
+ * Allocation bitmap size in bits.
+ */
+#define RDATREE_BM_SIZE (sizeof(rdatree_bm_t) * CHAR_BIT)
+
+/*
+ * Empty/full allocation bitmap words.
+ */
+#define RDATREE_BM_EMPTY ((rdatree_bm_t)0)
+#define RDATREE_BM_FULL \
+ ((~(rdatree_bm_t)0) >> (RDATREE_BM_SIZE - RDATREE_RADIX_SIZE))
+
+/*
+ * Radix tree node.
+ *
+ * The index member can only be used if the parent isn't null. Concerning the
+ * allocation bitmap, a bit is set when the node it denotes, or one of its
+ * children, can be used to allocate a slot. Similarly, a bit is clear when
+ * the matching node and all of its children have no free slot.
+ */
+struct rdatree_node {
+ struct rdatree_node *parent;
+ unsigned int index;
+ unsigned int nr_slots;
+ rdatree_bm_t alloc_bm;
+ void *slots[RDATREE_RADIX_SIZE];
+};
+
+static struct rdatree_node * rdatree_node_create(void)
+{
+ struct rdatree_node *node;
+
+ node = malloc(sizeof(*node));
+
+ if (node == NULL)
+ return NULL;
+
+ node->parent = NULL;
+ node->nr_slots = 0;
+ node->alloc_bm = RDATREE_BM_FULL;
+ memset(node->slots, 0, sizeof(node->slots));
+ return node;
+}
+
+static void rdatree_node_destroy(struct rdatree_node *node)
+{
+ free(node);
+}
+
+static inline void rdatree_node_link(struct rdatree_node *node,
+ struct rdatree_node *parent,
+ unsigned int index)
+{
+ node->parent = parent;
+ node->index = index;
+}
+
+static inline void rdatree_node_unlink(struct rdatree_node *node)
+{
+ assert(node->parent != NULL);
+ node->parent = NULL;
+}
+
+static inline int rdatree_node_full(struct rdatree_node *node)
+{
+ return (node->nr_slots == ARRAY_SIZE(node->slots));
+}
+
+static inline int rdatree_node_empty(struct rdatree_node *node)
+{
+ return (node->nr_slots == 0);
+}
+
+static inline void rdatree_node_insert(struct rdatree_node *node,
+ unsigned int index, void *ptr)
+{
+ assert(node->slots[index] == NULL);
+
+ node->nr_slots++;
+ node->slots[index] = ptr;
+}
+
+static inline void rdatree_node_remove(struct rdatree_node *node,
+ unsigned int index)
+{
+ assert(node->slots[index] != NULL);
+
+ node->nr_slots--;
+ node->slots[index] = NULL;
+}
+
+static inline void ** rdatree_node_find(struct rdatree_node *node,
+ unsigned int index)
+{
+ while (index < ARRAY_SIZE(node->slots)) {
+ if (node->slots[index] != NULL)
+ return &node->slots[index];
+
+ index++;
+ }
+
+ return NULL;
+}
+
+static inline void rdatree_node_bm_set(struct rdatree_node *node,
+ unsigned int index)
+{
+ node->alloc_bm |= (rdatree_bm_t)1 << index;
+}
+
+static inline void rdatree_node_bm_clear(struct rdatree_node *node,
+ unsigned int index)
+{
+ node->alloc_bm &= ~((rdatree_bm_t)1 << index);
+}
+
+static inline int rdatree_node_bm_is_set(struct rdatree_node *node,
+ unsigned int index)
+{
+ return (node->alloc_bm & ((rdatree_bm_t)1 << index));
+}
+
+static inline int rdatree_node_bm_empty(struct rdatree_node *node)
+{
+ return (node->alloc_bm == RDATREE_BM_EMPTY);
+}
+
+static inline int rdatree_node_bm_first(struct rdatree_node *node)
+{
+ return rdatree_ffs(node->alloc_bm) - 1;
+}
+
+static inline unsigned long rdatree_max_key(int height)
+{
+ unsigned int shift;
+
+ shift = RDATREE_RADIX * height;
+
+ if (shift >= (sizeof(unsigned long) * CHAR_BIT))
+ return ~0UL;
+ else
+ return (1 << shift) - 1;
+}
+
+static void rdatree_shrink(struct rdatree *tree)
+{
+ struct rdatree_node *node, *child;
+
+ while (tree->height > 0) {
+ node = tree->root;
+
+ if (node->nr_slots != 1)
+ break;
+
+ child = node->slots[0];
+
+ if (child == NULL)
+ break;
+
+ rdatree_node_destroy(node);
+ tree->height--;
+
+ if (tree->height > 0)
+ rdatree_node_unlink(child);
+
+ tree->root = child;
+ }
+}
+
+static int rdatree_grow(struct rdatree *tree, unsigned long key)
+{
+ struct rdatree_node *node;
+ int new_height;
+
+ new_height = tree->height + 1;
+
+ while (key > rdatree_max_key(new_height))
+ new_height++;
+
+ if (tree->root == NULL) {
+ tree->height = new_height;
+ return ERR_SUCCESS;
+ }
+
+ do {
+ node = rdatree_node_create();
+
+ if (node == NULL) {
+ rdatree_shrink(tree);
+ return ERR_NOMEM;
+ }
+
+ rdatree_node_insert(node, 0, tree->root);
+
+ if (tree->height == 0)
+ rdatree_node_bm_clear(node, 0);
+ else {
+ rdatree_node_link(tree->root, node, 0);
+
+ if (rdatree_node_bm_empty(tree->root))
+ rdatree_node_bm_clear(node, 0);
+ }
+
+ tree->root = node;
+ tree->height++;
+ } while (new_height > tree->height);
+
+ return ERR_SUCCESS;
+}
+
+static void rdatree_cleanup(struct rdatree *tree, struct rdatree_node *node)
+{
+ struct rdatree_node *prev;
+
+ for (;;) {
+ if (!rdatree_node_empty(node)) {
+ if (node->parent == NULL)
+ rdatree_shrink(tree);
+
+ break;
+ }
+
+ if (node->parent == NULL) {
+ rdatree_node_destroy(node);
+ tree->height = 0;
+ tree->root = NULL;
+ break;
+ }
+
+ prev = node;
+ node = node->parent;
+ rdatree_node_remove(node, prev->index);
+ rdatree_node_destroy(prev);
+ }
+}
+
+static void rdatree_insert_bm_clear(struct rdatree_node *node,
+ unsigned int index)
+{
+ for (;;) {
+ rdatree_node_bm_clear(node, index);
+
+ if ((node->parent == NULL) || !rdatree_node_full(node))
+ break;
+
+ index = node->index;
+ node = node->parent;
+ }
+}
+
+int rdatree_insert(struct rdatree *tree, unsigned long key, void *ptr)
+{
+ struct rdatree_node *node, *prev;
+ unsigned int index = index;
+ int error, height, shift;
+
+ assert(ptr != NULL);
+
+ if (key > rdatree_max_key(tree->height)) {
+ error = rdatree_grow(tree, key);
+
+ if (error)
+ return error;
+ }
+
+ height = tree->height;
+
+ if (height == 0) {
+ if (tree->root != NULL)
+ return ERR_BUSY;
+
+ tree->root = ptr;
+ return ERR_SUCCESS;
+ }
+
+ node = tree->root;
+ shift = (height - 1) * RDATREE_RADIX;
+ prev = NULL;
+
+ do {
+ if (node == NULL) {
+ node = rdatree_node_create();
+
+ if (node == NULL) {
+ if (prev == NULL)
+ tree->height = 0;
+ else
+ rdatree_cleanup(tree, prev);
+
+ return ERR_NOMEM;
+ }
+
+ if (prev == NULL)
+ tree->root = node;
+ else {
+ rdatree_node_insert(prev, index, node);
+ rdatree_node_link(node, prev, index);
+ }
+ }
+
+ prev = node;
+ index = (key >> shift) & RDATREE_RADIX_MASK;
+ node = prev->slots[index];
+ shift -= RDATREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node != NULL)
+ return ERR_BUSY;
+
+ rdatree_node_insert(prev, index, ptr);
+ rdatree_insert_bm_clear(prev, index);
+ return ERR_SUCCESS;
+}
+
+int rdatree_insert_alloc(struct rdatree *tree, void *ptr, unsigned long *keyp)
+{
+ struct rdatree_node *node, *prev;
+ unsigned long key;
+ int error, height, shift, index = index;
+
+ assert(ptr != NULL);
+
+ height = tree->height;
+
+ if (height == 0) {
+ if (tree->root == NULL) {
+ tree->root = ptr;
+ *keyp = 0;
+ return ERR_SUCCESS;
+ }
+
+ goto grow;
+ }
+
+ node = tree->root;
+ key = 0;
+ shift = (height - 1) * RDATREE_RADIX;
+ prev = NULL;
+
+ do {
+ if (node == NULL) {
+ node = rdatree_node_create();
+
+ if (node == NULL) {
+ rdatree_cleanup(tree, prev);
+ return ERR_NOMEM;
+ }
+
+ rdatree_node_insert(prev, index, node);
+ rdatree_node_link(node, prev, index);
+ }
+
+ prev = node;
+ index = rdatree_node_bm_first(node);
+
+ if (index == -1)
+ goto grow;
+
+ key |= ((unsigned long)index << shift);
+ node = node->slots[index];
+ shift -= RDATREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ rdatree_node_insert(prev, index, ptr);
+ rdatree_insert_bm_clear(prev, index);
+ goto out;
+
+grow:
+ key = rdatree_max_key(height) + 1;
+ error = rdatree_insert(tree, key, ptr);
+
+ if (error)
+ return error;
+
+out:
+ *keyp = key;
+ return ERR_SUCCESS;
+}
+
+static void rdatree_remove_bm_set(struct rdatree_node *node,
+ unsigned int index)
+{
+ do {
+ rdatree_node_bm_set(node, index);
+
+ if (node->parent == NULL)
+ break;
+
+ index = node->index;
+ node = node->parent;
+ } while (!rdatree_node_bm_is_set(node, index));
+}
+
+void * rdatree_remove(struct rdatree *tree, unsigned long key)
+{
+ struct rdatree_node *node, *prev;
+ unsigned int index;
+ int height, shift;
+
+ height = tree->height;
+
+ if (key > rdatree_max_key(height))
+ return NULL;
+
+ node = tree->root;
+
+ if (height == 0) {
+ tree->root = NULL;
+ return node;
+ }
+
+ shift = (height - 1) * RDATREE_RADIX;
+
+ do {
+ if (node == NULL)
+ return NULL;
+
+ prev = node;
+ index = (key >> shift) & RDATREE_RADIX_MASK;
+ node = node->slots[index];
+ shift -= RDATREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node == NULL)
+ return NULL;
+
+ rdatree_node_remove(prev, index);
+ rdatree_remove_bm_set(prev, index);
+ rdatree_cleanup(tree, prev);
+ return node;
+}
+
+static void ** rdatree_lookup_prim(struct rdatree *tree, unsigned long key)
+{
+ struct rdatree_node *node, *prev;
+ unsigned int index;
+ int height, shift;
+
+ height = tree->height;
+
+ if (key > rdatree_max_key(height))
+ return NULL;
+
+ if (height == 0) {
+ if (tree->root == NULL)
+ return NULL;
+
+ return &tree->root;
+ }
+
+ node = tree->root;
+ shift = (height - 1) * RDATREE_RADIX;
+
+ do {
+ if (node == NULL)
+ return NULL;
+
+ prev = node;
+ index = (key >> shift) & RDATREE_RADIX_MASK;
+ node = node->slots[index];
+ shift -= RDATREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node == NULL)
+ return NULL;
+
+ return &prev->slots[index];
+}
+
+void * rdatree_lookup(struct rdatree *tree, unsigned long key)
+{
+ void **slot;
+
+ slot = rdatree_lookup_prim(tree, key);
+ return (slot == NULL) ? NULL : *slot;
+}
+
+void ** rdatree_lookup_slot(struct rdatree *tree, unsigned long key)
+{
+ return rdatree_lookup_prim(tree, key);
+}
+
+void * rdatree_replace_slot(void **slot, void *ptr)
+{
+ void *old;
+
+ assert(ptr != NULL);
+
+ old = *slot;
+ assert(old != NULL);
+ *slot = ptr;
+ return old;
+}
+
+static struct rdatree_node * rdatree_walk(struct rdatree *tree,
+ struct rdatree_node *node)
+{
+ struct rdatree_node *prev;
+ void **slot;
+ unsigned int index;
+ int height;
+
+ if (node == NULL) {
+ height = tree->height;
+ node = tree->root;
+
+ while (height > 1) {
+ slot = rdatree_node_find(node, 0);
+ node = *slot;
+ height--;
+ }
+
+ return node;
+ }
+
+ height = 0;
+
+ for (;;) {
+ prev = node->parent;
+
+ if (prev == NULL)
+ return NULL;
+
+ index = node->index;
+ slot = rdatree_node_find(prev, index + 1);
+
+ if (slot != NULL)
+ break;
+
+ height++;
+ node = prev;
+ }
+
+ node = *slot;
+
+ while (height > 0) {
+ slot = rdatree_node_find(node, 0);
+ node = *slot;
+ height--;
+ }
+
+ return node;
+}
+
+void * rdatree_iter_next(struct rdatree *tree, struct rdatree_iter *iter)
+{
+ unsigned int index;
+
+ if (tree->height == 0) {
+ if (iter->slot != NULL)
+ return NULL;
+
+ iter->slot = &tree->root;
+ return *iter->slot;
+ }
+
+ if (iter->node != NULL) {
+ index = iter->slot - ((struct rdatree_node *)iter->node)->slots;
+ iter->slot = rdatree_node_find(iter->node, index + 1);
+ }
+
+ if (iter->slot == NULL) {
+ iter->node = rdatree_walk(tree, iter->node);
+
+ if (iter->node != NULL)
+ iter->slot = rdatree_node_find(iter->node, 0);
+ }
+
+ if (iter->slot == NULL)
+ return NULL;
+
+ return *iter->slot;
+}
+
+void rdatree_remove_all(struct rdatree *tree)
+{
+ struct rdatree_node *node, *next;
+ int height;
+
+ height = tree->height;
+
+ if (height == 0) {
+ tree->root = NULL;
+ return;
+ }
+
+ node = rdatree_walk(tree, NULL);
+
+ do {
+ next = rdatree_walk(tree, node);
+ node->nr_slots = 0;
+ rdatree_cleanup(tree, node);
+ node = next;
+ } while (node != NULL);
+}
diff --git a/rdatree.h b/rdatree.h
new file mode 100644
index 0000000..ee9e37f
--- /dev/null
+++ b/rdatree.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Radix (with allocation) tree.
+ */
+
+#ifndef _RDATREE_H
+#define _RDATREE_H
+
+/*
+ * Radix tree.
+ */
+struct rdatree {
+ int height;
+ void *root;
+};
+
+/*
+ * Radix tree iterator.
+ *
+ * Don't use directly - use rdatree_for_each() instead.
+ */
+struct rdatree_iter {
+ void *node;
+ void **slot;
+};
+
+/*
+ * Static tree initializer.
+ */
+#define RDATREE_INITIALIZER { 0, NULL }
+
+/*
+ * Initialize a tree.
+ */
+static inline void rdatree_init(struct rdatree *tree)
+{
+ tree->height = 0;
+ tree->root = NULL;
+}
+
+/*
+ * Initialize an iterator.
+ */
+static inline void rdatree_iter_init(struct rdatree_iter *iter)
+{
+ iter->node = NULL;
+ iter->slot = NULL;
+}
+
+/*
+ * Insert a pointer in a tree.
+ *
+ * The ptr parameter must not be null.
+ */
+int rdatree_insert(struct rdatree *tree, unsigned long key, void *ptr);
+
+/*
+ * Insert a pointer in a tree, for which a new key is allocated.
+ *
+ * The ptr and keyp parameters must not be null. The newly allocated key is
+ * stored at the address pointed to by the keyp parameter.
+ */
+int rdatree_insert_alloc(struct rdatree *tree, void *ptr, unsigned long *keyp);
+
+/*
+ * Remove a pointer from a tree.
+ *
+ * The matching pointer is returned if successfull, null otherwise.
+ */
+void * rdatree_remove(struct rdatree *tree, unsigned long key);
+
+/*
+ * Look up a pointer in a tree.
+ *
+ * The matching pointer is returned if successfull, null otherwise.
+ */
+void * rdatree_lookup(struct rdatree *tree, unsigned long key);
+
+/*
+ * Look up a slot in a tree.
+ *
+ * A slot is a pointer to a contained pointer in a tree. It can be used as
+ * a placeholder for fast replacements to avoid multiple lookups on the same
+ * key.
+ *
+ * A slot for the matching pointer is returned if successfull, null otherwise.
+ *
+ * See rdatree_replace_slot().
+ */
+void ** rdatree_lookup_slot(struct rdatree *tree, unsigned long key);
+
+/*
+ * Replace a pointer in a tree.
+ *
+ * The ptr parameter must not be null. The previous pointer is returned.
+ *
+ * See rdatree_lookup_slot().
+ */
+void * rdatree_replace_slot(void **slot, void *ptr);
+
+/*
+ * Walk pointers in a tree.
+ *
+ * Move the iterator to the next pointer in the given tree.
+ *
+ * The next pointer is returned if there is one, null otherwise.
+ */
+void * rdatree_iter_next(struct rdatree *tree, struct rdatree_iter *iter);
+
+/*
+ * Forge a loop to process all pointers of a tree.
+ */
+#define rdatree_for_each(tree, iter, ptr) \
+for (rdatree_iter_init(iter), ptr = rdatree_iter_next(tree, iter); \
+ ptr != NULL; \
+ ptr = rdatree_iter_next(tree, iter))
+
+/*
+ * Remove all pointers from a tree.
+ *
+ * The common way to destroy a tree and its pointers is to loop over all
+ * the pointers using rdatree_for_each(), freeing them, then call this
+ * function.
+ */
+void rdatree_remove_all(struct rdatree *tree);
+
+#endif /* _RDATREE_H */
diff --git a/test/test_avltree.c b/test/test_avltree.c
new file mode 100644
index 0000000..f8fcec8
--- /dev/null
+++ b/test/test_avltree.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../hash.h"
+#include "../macros.h"
+#include "../avltree.h"
+
+#define SIZE 28
+
+struct obj {
+ struct avltree_node node;
+ int id;
+};
+
+static inline int obj_cmp_lookup(int id, struct avltree_node *node)
+{
+ struct obj *obj;
+
+ obj = avltree_entry(node, struct obj, node);
+ return id - obj->id;
+}
+
+static void print_subtree(struct avltree_node *node, int level)
+{
+ struct obj *obj;
+ char balance;
+ int i;
+
+ if (node == NULL)
+ return;
+
+ print_subtree(node->children[AVLTREE_RIGHT], level + 1);
+
+ for (i = level; i > 0; i--)
+ putchar(' ');
+
+ obj = avltree_entry(node, struct obj, node);
+
+ switch(node->parent & AVLTREE_BALANCE_MASK) {
+ case 0:
+ balance = '-';
+ break;
+ case 1:
+ balance = '0';
+ break;
+ case 2:
+ balance = '+';
+ break;
+ default:
+ printf("invalid balance for node %d\n", obj->id);
+ return;
+ }
+
+ printf("%d:%c\n", obj->id, balance);
+
+ print_subtree(node->children[AVLTREE_LEFT], level + 1);
+}
+
+static void print_tree(struct avltree *tree)
+{
+ print_subtree(tree->root, 0);
+}
+
+static int get_id(int i)
+{
+ return hash_int32(i, 6);
+}
+
+int main(int argc, char *argv[])
+{
+ struct avltree tree;
+ struct avltree_node *node, *tmp;
+ struct obj *obj;
+ unsigned long slot;
+ int i, id;
+
+ (void)argc;
+ (void)argv;
+
+ avltree_init(&tree);
+
+ for (i = 0; i < SIZE; i++) {
+ id = get_id(i);
+ node = avltree_lookup_slot(&tree, id, obj_cmp_lookup, slot);
+
+ if (node != NULL)
+ continue;
+
+ obj = malloc(sizeof(*obj));
+ obj->id = id;
+ printf("%d ", obj->id);
+ avltree_insert_slot(&tree, slot, &obj->node);
+ }
+
+ printf("\n");
+ print_tree(&tree);
+
+ avltree_for_each_remove(&tree, node, tmp) {
+ obj = avltree_entry(node, struct obj, node);
+ printf("freeing obj %d\n", obj->id);
+ free(obj);
+ }
+
+ return 0;
+}
diff --git a/test/test_mem.c b/test/test_mem.c
new file mode 100644
index 0000000..f0aac95
--- /dev/null
+++ b/test/test_mem.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "../mem.h"
+#include "../macros.h"
+
+#define STRING "This is a test string."
+#define STRING_SIZE (STRLEN(STRING) + 1)
+
+int main(int argc, char *argv[])
+{
+ char *s;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ s = mem_alloc(STRING_SIZE);
+
+ if (s == NULL) {
+ fprintf(stderr, "unable to allocate memory\n");
+ return 1;
+ }
+
+ strcpy(s, STRING);
+ printf("string: '%s'\n", s);
+ mem_free(s, STRING_SIZE);
+
+ return 0;
+}
diff --git a/test/test_mem_cache.c b/test/test_mem_cache.c
new file mode 100644
index 0000000..9ca53f4
--- /dev/null
+++ b/test/test_mem_cache.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <pthread.h>
+
+#include <stdio.h>
+
+#include "../cpu.h"
+#include "../mem.c"
+#include "../error.c"
+#include "../avltree.c"
+
+#ifdef CONFIG_MEM_USE_PHYS
+#include "../phys.c"
+#endif /* CONFIG_MEM_USE_PHYS */
+
+#define NTHREADS 4
+#define TEST_TIME 60
+#define OBJSPERLOOP 100
+
+struct result {
+ unsigned long allocs;
+ unsigned long frees;
+} __aligned(CPU_L1_SIZE);
+
+struct obj {
+ unsigned long nr_refs;
+ char name[16];
+};
+
+static void obj_ctor(void *ptr)
+{
+ struct obj *obj;
+
+ obj = ptr;
+ obj->nr_refs = 0;
+}
+
+static struct mem_cache *obj_cache;
+static volatile int work;
+static struct result results[NTHREADS];
+
+static void * run(void *arg)
+{
+ struct obj *objs[OBJSPERLOOP];
+ struct result *result;
+ int i, id;
+
+ result = arg;
+ id = result - results;
+
+ mem_print("started");
+
+ while (work) {
+ for (i = 0; i < OBJSPERLOOP; i++) {
+ objs[i] = mem_cache_alloc(obj_cache);
+ result->allocs++;
+ }
+
+ for (i = 0; i < OBJSPERLOOP; i++) {
+ mem_cache_free(obj_cache, objs[i]);
+ result->frees++;
+ }
+ }
+
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ pthread_t threads[NTHREADS];
+ unsigned long ops;
+ int i;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ mem_info();
+
+ mem_print("Selected cache line size: %u", CPU_L1_SIZE);
+ mem_print("sizeof(pthread_mutex_t): %zu", sizeof(pthread_mutex_t));
+ mem_print("sizeof(struct mem_cpu_pool): %zu", sizeof(struct mem_cpu_pool));
+ mem_print("sizeof(union mem_bufctl): %zu", sizeof(union mem_bufctl));
+ mem_print("sizeof(struct mem_buftag): %zu", sizeof(struct mem_buftag));
+ mem_print("sizeof(struct mem_slab): %zu", sizeof(struct mem_slab));
+ mem_print("sizeof(struct mem_cache): %zu", sizeof(struct mem_cache));
+ mem_print("sizeof(struct obj): %zu", sizeof(struct obj));
+
+ obj_cache = mem_cache_create("obj", sizeof(struct obj), 0,
+ obj_ctor, NULL, 0);
+
+ memset(results, 0, sizeof(results));
+ work = 1;
+
+ for (i = 0; i < NTHREADS; i++)
+ pthread_create(&threads[i], NULL, run, &results[i]);
+
+ sleep(TEST_TIME);
+ work = 0;
+
+ for (i = 0; i < NTHREADS; i++)
+ pthread_join(threads[i], NULL);
+
+ ops = 0;
+
+ for (i = 0; i < NTHREADS; i++)
+ ops += results[i].allocs + results[i].frees;
+
+ mem_info();
+ mem_cache_info(obj_cache);
+ mem_print("total: %lu ops in %d secs", ops, TEST_TIME);
+
+ mem_cache_destroy(obj_cache);
+
+ return 0;
+}
diff --git a/test/test_mem_cache_double_free.c b/test/test_mem_cache_double_free.c
new file mode 100644
index 0000000..472c422
--- /dev/null
+++ b/test/test_mem_cache_double_free.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "../mem.h"
+#include "../macros.h"
+
+struct obj {
+ unsigned long nr_refs;
+ char name[16];
+};
+
+static void obj_ctor(void *ptr)
+{
+ struct obj *obj;
+
+ obj = ptr;
+ obj->nr_refs = 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct mem_cache *obj_cache;
+ struct obj *obj;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ obj_cache = mem_cache_create("obj", sizeof(struct obj), 0,
+ obj_ctor, NULL, MEM_CACHE_VERIFY);
+
+ printf("trying normal alloc+free:\n");
+ obj = mem_cache_alloc(obj_cache);
+ mem_cache_free(obj_cache, obj);
+
+ mem_cache_info(obj_cache);
+
+ printf("trying alloc+double free:\n");
+ obj = mem_cache_alloc(obj_cache);
+ mem_cache_free(obj_cache, obj);
+ mem_cache_free(obj_cache, obj);
+
+ printf("done\n");
+
+ return 0;
+}
diff --git a/test/test_mem_cache_invalid_free.c b/test/test_mem_cache_invalid_free.c
new file mode 100644
index 0000000..85f660a
--- /dev/null
+++ b/test/test_mem_cache_invalid_free.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "../mem.h"
+#include "../macros.h"
+
+struct obj {
+ unsigned long nr_refs;
+ char name[16];
+};
+
+static void obj_ctor(void *ptr)
+{
+ struct obj *obj;
+
+ obj = ptr;
+ obj->nr_refs = 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct mem_cache *obj_cache;
+ struct obj *obj;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ obj_cache = mem_cache_create("obj", sizeof(struct obj), 0,
+ obj_ctor, NULL, MEM_CACHE_VERIFY);
+
+ printf("trying normal alloc+free:\n");
+ obj = mem_cache_alloc(obj_cache);
+ mem_cache_free(obj_cache, obj);
+
+ mem_cache_info(obj_cache);
+
+ printf("trying alloc+invalid free:\n");
+ obj = mem_cache_alloc(obj_cache);
+ mem_cache_free(obj_cache, (char *)obj + 1);
+
+ printf("done\n");
+
+ return 0;
+}
diff --git a/test/test_mem_cache_write_beyond.c b/test/test_mem_cache_write_beyond.c
new file mode 100644
index 0000000..1519d43
--- /dev/null
+++ b/test/test_mem_cache_write_beyond.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "../mem.h"
+#include "../macros.h"
+
+struct obj {
+ unsigned long nr_refs;
+ char name[16];
+};
+
+static void obj_ctor(void *ptr)
+{
+ struct obj *obj;
+
+ obj = ptr;
+ obj->nr_refs = 0;
+}
+
+static void obj_print(struct obj *obj, size_t size)
+{
+ unsigned char *ptr, *end;
+
+ printf("buffer content: ");
+
+ for (ptr = (unsigned char *)obj, end = ptr + size; ptr < end; ptr++)
+ printf("%02x", *ptr);
+
+ printf("\n");
+}
+
+int main(int argc, char *argv[])
+{
+ struct mem_cache *obj_cache;
+ struct obj *obj;
+ size_t buf_size;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ obj_cache = mem_cache_create("obj", sizeof(struct obj), 0,
+ obj_ctor, NULL, MEM_CACHE_VERIFY);
+ mem_cache_info(obj_cache);
+
+ printf("doing normal alloc:\n");
+ obj = mem_cache_alloc(obj_cache);
+
+ printf("writing beyond object boundary:\n");
+ strcpy(obj->name, "invalid write 000000");
+ buf_size = P2ROUND(sizeof(*obj), 8);
+ obj_print(obj, buf_size + (sizeof(unsigned long) * 2));
+
+ printf("doing normal free:\n");
+ mem_cache_free(obj_cache, obj);
+
+ printf("done\n");
+
+ return 0;
+}
diff --git a/test/test_mem_cache_write_buftag.c b/test/test_mem_cache_write_buftag.c
new file mode 100644
index 0000000..88ba716
--- /dev/null
+++ b/test/test_mem_cache_write_buftag.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "../mem.h"
+#include "../macros.h"
+
+struct obj {
+ unsigned long nr_refs;
+ char name[16];
+};
+
+static void obj_ctor(void *ptr)
+{
+ struct obj *obj;
+
+ obj = ptr;
+ obj->nr_refs = 0;
+}
+
+static void obj_print(struct obj *obj, size_t size)
+{
+ unsigned char *ptr, *end;
+
+ printf("buffer content: ");
+
+ for (ptr = (unsigned char *)obj, end = ptr + size; ptr < end; ptr++)
+ printf("%02x", *ptr);
+
+ printf("\n");
+}
+
+int main(int argc, char *argv[])
+{
+ struct mem_cache *obj_cache;
+ struct obj *obj;
+ unsigned long *ptr;
+ size_t buf_size;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ obj_cache = mem_cache_create("obj", sizeof(struct obj), 0,
+ obj_ctor, NULL, MEM_CACHE_VERIFY);
+ mem_cache_info(obj_cache);
+
+ printf("doing normal alloc:\n");
+ obj = mem_cache_alloc(obj_cache);
+
+ printf("writing garbage in buftag:\n");
+ buf_size = P2ROUND(sizeof(*obj), 8);
+ ptr = (void *)obj + buf_size + sizeof(unsigned long);
+ *ptr = 0xed0000a1;
+ obj_print(obj, buf_size + (sizeof(unsigned long) * 2));
+
+ printf("doing normal free:\n");
+ mem_cache_free(obj_cache, obj);
+
+ printf("done\n");
+
+ return 0;
+}
diff --git a/test/test_mem_cache_write_free.c b/test/test_mem_cache_write_free.c
new file mode 100644
index 0000000..0850faf
--- /dev/null
+++ b/test/test_mem_cache_write_free.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "../mem.h"
+#include "../macros.h"
+
+struct obj {
+ unsigned long nr_refs;
+ char name[16];
+};
+
+static void obj_ctor(void *ptr)
+{
+ struct obj *obj;
+
+ obj = ptr;
+ obj->nr_refs = 0;
+}
+
+static void obj_print(struct obj *obj)
+{
+ unsigned char *ptr, *end;
+
+ printf("buffer content: ");
+
+ for (ptr = (unsigned char *)obj, end = ptr + sizeof(*obj);
+ ptr < end;
+ ptr++)
+ printf("%02x", *ptr);
+
+ printf("\n");
+}
+
+int main(int argc, char *argv[])
+{
+ struct mem_cache *obj_cache;
+ struct obj *obj;
+ cpu_set_t cpu_set;
+
+ (void)argc;
+ (void)argv;
+
+ printf("binding to CPU 0\n");
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+
+ mem_init();
+
+ obj_cache = mem_cache_create("obj", sizeof(struct obj), 0,
+ obj_ctor, NULL, MEM_CACHE_VERIFY);
+
+ printf("doing normal alloc+free:\n");
+ obj = mem_cache_alloc(obj_cache);
+ mem_cache_free(obj_cache, obj);
+
+ mem_cache_info(obj_cache);
+
+ obj_print(obj);
+
+ printf("doing write on free object:\n");
+ memset((void *)obj + 3, 0x89, 5);
+
+ obj_print(obj);
+
+ printf("trying normal alloc+free on same CPU (should fail):\n");
+
+ obj = mem_cache_alloc(obj_cache);
+ mem_cache_free(obj_cache, obj);
+
+ printf("done\n");
+
+ return 0;
+}
diff --git a/test/test_mem_offbyone.c b/test/test_mem_offbyone.c
new file mode 100644
index 0000000..084bdb5
--- /dev/null
+++ b/test/test_mem_offbyone.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "../mem.h"
+
+#define BUFSIZE 200
+
+int main(int argc, char *argv[])
+{
+ char *s;
+
+ (void)argc;
+ (void)argv;
+
+ mem_init();
+
+ printf("allocating memory:\n");
+ s = mem_alloc(BUFSIZE);
+
+ if (s == NULL) {
+ fprintf(stderr, "unable to allocate memory\n");
+ return 1;
+ }
+
+ printf("writing beyond end of buffer:\n");
+ memset(s, 'a', BUFSIZE + 1);
+
+ printf("releasing buffer, should fail if CONFIG_MEM_VERIFY defined\n");
+ mem_free(s, BUFSIZE);
+
+ return 0;
+}
diff --git a/test/test_phys.c b/test/test_phys.c
new file mode 100644
index 0000000..fe94b43
--- /dev/null
+++ b/test/test_phys.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "../phys.c"
+
+int main(int argc, char *argv[])
+{
+ struct phys_page *page;
+
+ (void)argc;
+ (void)argv;
+
+ phys_init();
+
+ phys_info();
+ printf("sizeof(struct phys_cpu_pool) = %zu\n",
+ sizeof(struct phys_cpu_pool));
+ printf("sizeof(struct phys_free_list) = %zu\n",
+ sizeof(struct phys_free_list));
+ printf("sizeof(struct phys_page): %zu\n", sizeof(struct phys_page));
+ printf("sizeof(struct phys_seg): %zu\n", sizeof(struct phys_seg));
+ printf("allocating one page\n");
+ page = phys_alloc_pages(1);
+
+ if (page == NULL) {
+ fprintf(stderr, "unable to allocate memory\n");
+ return 1;
+ }
+
+ phys_info();
+
+ printf("freeing the allocated page\n");
+ phys_free_pages(page, 1);
+ phys_info();
+
+ return 0;
+}
diff --git a/test/test_rbtree.c b/test/test_rbtree.c
new file mode 100644
index 0000000..e379fb8
--- /dev/null
+++ b/test/test_rbtree.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../hash.h"
+#include "../macros.h"
+#include "../rbtree.h"
+
+#define SIZE 28
+
+struct obj {
+ struct rbtree_node node;
+ int id;
+};
+
+static inline int obj_cmp_lookup(int id, struct rbtree_node *node)
+{
+ struct obj *obj;
+
+ obj = rbtree_entry(node, struct obj, node);
+ return id - obj->id;
+}
+
+static void print_subtree(struct rbtree_node *node, int level)
+{
+ struct obj *obj;
+ char color;
+ int i;
+
+ if (node == NULL)
+ return;
+
+ print_subtree(node->children[RBTREE_RIGHT], level + 1);
+
+ for (i = level; i > 0; i--)
+ putchar(' ');
+
+ obj = rbtree_entry(node, struct obj, node);
+ color = (node->parent & RBTREE_COLOR_MASK) ? 'b' : 'r';
+ printf("%d:%c\n", obj->id, color);
+
+ print_subtree(node->children[RBTREE_LEFT], level + 1);
+}
+
+static void print_tree(struct rbtree *tree)
+{
+ print_subtree(tree->root, 0);
+}
+
+static int get_id(int i)
+{
+ return hash_int32(i, 6);
+}
+
+int main(int argc, char *argv[])
+{
+ struct rbtree tree;
+ struct rbtree_node *node, *tmp;
+ struct obj *obj;
+ unsigned long slot;
+ int i, id;
+
+ (void)argc;
+ (void)argv;
+
+ rbtree_init(&tree);
+
+ for (i = 0; i < SIZE; i++) {
+ id = get_id(i);
+ node = rbtree_lookup_slot(&tree, id, obj_cmp_lookup, slot);
+
+ if (node != NULL)
+ continue;
+
+ obj = malloc(sizeof(*obj));
+ obj->id = id;
+ printf("%d ", obj->id);
+ rbtree_insert_slot(&tree, slot, &obj->node);
+ }
+
+ printf("\n");
+ print_tree(&tree);
+
+ rbtree_for_each_remove(&tree, node, tmp) {
+ obj = rbtree_entry(node, struct obj, node);
+ printf("freeing obj %d\n", obj->id);
+ free(obj);
+ }
+
+ return 0;
+}
diff --git a/test/test_rdatree.c b/test/test_rdatree.c
new file mode 100644
index 0000000..4ecd64d
--- /dev/null
+++ b/test/test_rdatree.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (c) 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdarg.h>
+
+#include "../macros.h"
+#include "../rdatree.c"
+
+#define TITLE(str) printf("%s: %s\n", __func__, str)
+
+#if RDATREE_RADIX < 6
+#define BM_FORMAT "%lx"
+#elif RDATREE_RADIX == 6 /* RDATREE_RADIX < 6 */
+#ifdef __LP64__
+#define BM_FORMAT "%lx"
+#else /* __LP64__ */
+#define BM_FORMAT "%llx"
+#endif /* __LP64__ */
+#endif /* RDATREE_RADIX < 6 */
+
+struct obj {
+ unsigned long id;
+};
+
+static struct obj * obj_create(unsigned long id)
+{
+ struct obj *obj;
+
+ obj = malloc(sizeof(*obj));
+ assert(obj != NULL);
+ obj->id = id;
+ return obj;
+}
+
+static void obj_destroy(struct obj *obj)
+{
+ free(obj);
+}
+
+static void print_subtree(struct rdatree_node *node, int height, size_t index,
+ size_t level);
+
+static void print_value(void *ptr, size_t index, size_t level)
+{
+ struct obj *obj;
+ int i;
+
+ if (ptr == NULL)
+ return;
+
+ obj = ptr;
+
+ for (i = level; i > 0; i--)
+ putchar(' ');
+
+ printf("%zu:%lu\n", index, obj->id);
+}
+
+static void print_values(struct rdatree_node *node, size_t index, size_t level)
+{
+ size_t i;
+
+ for (i = level; i > 0; i--)
+ putchar(' ');
+
+ printf("%zu:n (bm: " BM_FORMAT ")\n", index, node->alloc_bm);
+
+ for (i = 0; i < ARRAY_SIZE(node->slots); i++)
+ print_value(node->slots[i], i, level + 1);
+}
+
+static void print_node(struct rdatree_node *node, int height, size_t index,
+ size_t level)
+{
+ size_t i;
+
+ for (i = level; i > 0; i--)
+ putchar(' ');
+
+ printf("%zu:n (bm: " BM_FORMAT ")\n", index, node->alloc_bm);
+
+ for (i = 0; i < ARRAY_SIZE(node->slots); i++)
+ print_subtree(node->slots[i], height - 1, i, level + 1);
+}
+
+static void print_subtree(struct rdatree_node *node, int height, size_t index,
+ size_t level)
+{
+ if (node == NULL)
+ return;
+
+ if (height == 1)
+ print_values(node, index, level);
+ else
+ print_node(node, height, index, level);
+}
+
+static void print_tree(struct rdatree *tree)
+{
+ if (tree->height == 0)
+ print_value(tree->root, 0, 0);
+ else
+ print_subtree(tree->root, tree->height, 0, 0);
+}
+
+static void destroy_tree(struct rdatree *tree)
+{
+ struct rdatree_iter iter;
+ struct obj *obj;
+
+ rdatree_for_each(tree, &iter, obj)
+ obj_destroy(obj);
+
+ rdatree_remove_all(tree);
+}
+
+static void test_1(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ int error;
+
+ TITLE("insert 0");
+
+ rdatree_init(&tree);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ print_tree(&tree);
+ destroy_tree(&tree);
+}
+
+static void test_2(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ int error;
+
+ TITLE("insert 1");
+
+ rdatree_init(&tree);
+ obj = obj_create(1);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ print_tree(&tree);
+ destroy_tree(&tree);
+}
+
+static void test_3(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ int error;
+
+ TITLE("insert 0 and 1");
+
+ rdatree_init(&tree);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ obj = obj_create(1);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ print_tree(&tree);
+ destroy_tree(&tree);
+}
+
+static void test_4(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ int error;
+
+ TITLE("insert 1 and 0");
+
+ rdatree_init(&tree);
+ obj = obj_create(1);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ print_tree(&tree);
+ destroy_tree(&tree);
+}
+
+static void test_5(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ int error;
+
+ TITLE("insert 0 and 4096");
+
+ rdatree_init(&tree);
+ obj = obj_create(4096);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ print_tree(&tree);
+ destroy_tree(&tree);
+}
+
+static void test_6(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ int error;
+
+ TITLE("insert 4096 and 0");
+
+ rdatree_init(&tree);
+ obj = obj_create(4096);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ print_tree(&tree);
+ destroy_tree(&tree);
+}
+
+static void test_7(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ void *ptr;
+ int error;
+
+ TITLE("insert and remove 0");
+
+ rdatree_init(&tree);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ ptr = rdatree_remove(&tree, obj->id);
+ assert(ptr == obj);
+ obj_destroy(obj);
+ print_tree(&tree);
+}
+
+static void test_8(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ void *ptr;
+ int error;
+
+ TITLE("insert and remove 4096");
+
+ rdatree_init(&tree);
+ obj = obj_create(4096);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ ptr = rdatree_remove(&tree, obj->id);
+ assert(ptr == obj);
+ obj_destroy(obj);
+ print_tree(&tree);
+}
+
+static void test_9(void)
+{
+ struct rdatree tree;
+ struct obj *obj1, *obj2;
+ void *ptr;
+ int error;
+
+ TITLE("insert 0 and 4096 and remove in reverse order");
+
+ rdatree_init(&tree);
+ obj1 = obj_create(0);
+ error = rdatree_insert(&tree, obj1->id, obj1);
+ assert(!error);
+ obj2 = obj_create(4096);
+ error = rdatree_insert(&tree, obj2->id, obj2);
+ assert(!error);
+ ptr = rdatree_remove(&tree, obj2->id);
+ assert(ptr == obj2);
+ obj_destroy(obj2);
+ ptr = rdatree_remove(&tree, obj1->id);
+ assert(ptr == obj1);
+ obj_destroy(obj1);
+ print_tree(&tree);
+}
+
+static void test_10(void)
+{
+ struct rdatree tree;
+ struct obj *obj1, *obj2;
+ void *ptr;
+ int error;
+
+ TITLE("insert 0 and 4096 and remove in same order");
+
+ rdatree_init(&tree);
+ obj1 = obj_create(0);
+ error = rdatree_insert(&tree, obj1->id, obj1);
+ assert(!error);
+ obj2 = obj_create(4096);
+ error = rdatree_insert(&tree, obj2->id, obj2);
+ assert(!error);
+ ptr = rdatree_remove(&tree, obj1->id);
+ assert(ptr == obj1);
+ obj_destroy(obj1);
+ ptr = rdatree_remove(&tree, obj2->id);
+ assert(ptr == obj2);
+ obj_destroy(obj2);
+ print_tree(&tree);
+}
+
+static void test_11(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert [0..4096] and remove in reverse order");
+
+ rdatree_init(&tree);
+
+ for (i = 0; i <= 4096; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ for (i = 4096; i <= 4096; i--) {
+ obj = rdatree_remove(&tree, i);
+ obj_destroy(obj);
+ }
+
+ print_tree(&tree);
+}
+
+static void test_12(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert [0..4096] and remove in same order");
+
+ rdatree_init(&tree);
+
+ for (i = 0; i <= 4096; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ for (i = 0; i <= 4096; i++) {
+ obj = rdatree_remove(&tree, i);
+ obj_destroy(obj);
+ }
+
+ print_tree(&tree);
+}
+
+static void test_13(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("allocate");
+
+ rdatree_init(&tree);
+ obj = obj_create(0);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == 0);
+ print_tree(&tree);
+ i = obj->id;
+ obj = rdatree_lookup(&tree, i);
+ assert(obj->id == i);
+ destroy_tree(&tree);
+}
+
+static void test_14(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert 0, allocate");
+
+ rdatree_init(&tree);
+ obj = obj_create(0);
+ error = rdatree_insert(&tree, obj->id, obj);
+ assert(!error);
+ obj = obj_create(0);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == 1);
+ print_tree(&tree);
+ i = obj->id;
+ obj = rdatree_lookup(&tree, i);
+ assert(obj->id == i);
+ destroy_tree(&tree);
+}
+
+static void test_15(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert [0..4095], remove 0, allocate");
+
+ rdatree_init(&tree);
+
+ for (i = 0; i < 4096; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ obj = rdatree_remove(&tree, 0);
+ assert(obj->id == 0);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == 0);
+ destroy_tree(&tree);
+}
+
+static void test_16(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert [0..4095], remove 1, allocate");
+
+ rdatree_init(&tree);
+
+ for (i = 0; i < 4096; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ obj = rdatree_remove(&tree, 1);
+ assert(obj->id == 1);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == 1);
+ destroy_tree(&tree);
+}
+
+static void test_17(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert [0..63] and [128..191], allocate x65");
+
+ rdatree_init(&tree);
+
+ for (i = 0; i < 64; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ for (i = 128; i < 192; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ for (i = 64; i < 128; i++) {
+ obj = obj_create(0);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == i);
+ }
+
+ obj = obj_create(0);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == 192);
+ destroy_tree(&tree);
+}
+
+static void test_18(void)
+{
+ struct rdatree tree;
+ struct obj *obj;
+ unsigned long i;
+ int error;
+
+ TITLE("insert [0..4095], allocate");
+
+ rdatree_init(&tree);
+
+ for (i = 0; i < 4096; i++) {
+ obj = obj_create(i);
+ error = rdatree_insert(&tree, i, obj);
+ assert(!error);
+ }
+
+ obj = obj_create(0);
+ error = rdatree_insert_alloc(&tree, obj, &obj->id);
+ assert(!error);
+ assert(obj->id == 4096);
+ destroy_tree(&tree);
+}
+
+static void test_19(void)
+{
+ struct rdatree tree;
+ struct obj *obj1, *obj2, *tmp;
+ void **slot;
+ int error;
+
+ TITLE("insert 0, replace");
+
+ rdatree_init(&tree);
+ obj1 = obj_create(0);
+ error = rdatree_insert(&tree, 0, obj1);
+ assert(!error);
+ slot = rdatree_lookup_slot(&tree, 0);
+ assert(slot != NULL);
+ obj2 = obj_create(0);
+ tmp = rdatree_replace_slot(slot, obj2);
+ assert(obj1 == tmp);
+ obj_destroy(obj1);
+ print_tree(&tree);
+ tmp = rdatree_lookup(&tree, 0);
+ assert(obj2 == tmp);
+ destroy_tree(&tree);
+}
+
+static void test_20(void)
+{
+ struct rdatree tree;
+ struct obj *obj1, *obj2, *tmp;
+ void **slot;
+ int error;
+
+ TITLE("insert 4096, replace");
+
+ rdatree_init(&tree);
+ obj1 = obj_create(4096);
+ error = rdatree_insert(&tree, 4096, obj1);
+ assert(!error);
+ slot = rdatree_lookup_slot(&tree, 4096);
+ assert(slot != NULL);
+ obj2 = obj_create(4096);
+ tmp = rdatree_replace_slot(slot, obj2);
+ assert(obj1 == tmp);
+ obj_destroy(obj1);
+ print_tree(&tree);
+ tmp = rdatree_lookup(&tree, 4096);
+ assert(obj2 == tmp);
+ destroy_tree(&tree);
+}
+
+int main(int argc, char *argv[])
+{
+ (void)argc;
+ (void)argv;
+
+ test_1();
+ test_2();
+ test_3();
+ test_4();
+ test_5();
+ test_6();
+ test_7();
+ test_8();
+ test_9();
+ test_10();
+ test_11();
+ test_12();
+ test_13();
+ test_14();
+ test_15();
+ test_16();
+ test_17();
+ test_18();
+ test_19();
+ test_20();
+ return 0;
+}
diff --git a/test/test_xprintf.c b/test/test_xprintf.c
new file mode 100644
index 0000000..84a4176
--- /dev/null
+++ b/test/test_xprintf.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "../macros.h"
+#include "../xprintf.h"
+
+#define TEST_PRINTF(format, ...) \
+MACRO_BEGIN \
+ char stra[256], strb[256]; \
+ int la, lb; \
+ la = snprintf(stra, 256, format, ## __VA_ARGS__); \
+ printf(" printf: %s", stra); \
+ lb = xsnprintf(strb, 256, format, ## __VA_ARGS__); \
+ xprintf("xprintf: %s", stra); \
+ assert(la == lb); \
+ assert(strcmp(stra, strb) == 0); \
+MACRO_END
+
+int main(int argc, char *argv[])
+{
+ (void)argc;
+ (void)argv;
+
+#define FORMAT "%c"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 'a');
+#undef FORMAT
+
+#define FORMAT "%8c"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 'a');
+#undef FORMAT
+
+#define FORMAT "%-8c"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 'a');
+#undef FORMAT
+
+#define FORMAT "%.s"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "12345");
+#undef FORMAT
+
+#define FORMAT "%.3s"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "12345");
+#undef FORMAT
+
+#define FORMAT "%4.3s"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "12345");
+#undef FORMAT
+
+#define FORMAT "%-4.3s"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "12345");
+#undef FORMAT
+
+#define FORMAT "%3.4s"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "12345");
+#undef FORMAT
+
+#define FORMAT "%-3.4s"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "12345");
+#undef FORMAT
+
+#define FORMAT "%#o"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%#x"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%#X"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%08d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%-8d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%-08d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%0-8d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "% d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%+d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%+ d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%12d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%*d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 12, 123);
+#undef FORMAT
+
+#define FORMAT "%.12d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%.012d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%.*d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 12, 123);
+#undef FORMAT
+
+#define FORMAT "%.d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%.*d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -12, 123);
+#undef FORMAT
+
+#define FORMAT "%.4d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%5.4d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%4.5d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0);
+#undef FORMAT
+
+#define FORMAT "%.0d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0);
+#undef FORMAT
+
+#define FORMAT "%.0o"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0);
+#undef FORMAT
+
+#define FORMAT "%.0x"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0);
+#undef FORMAT
+
+#define FORMAT "%1.0d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0);
+#undef FORMAT
+
+#define FORMAT "%08.0d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%08d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%08d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%8d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%8d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%.8d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%.80d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%-80d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%80d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%80.40d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%-+80.40d"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%+x"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, -123);
+#undef FORMAT
+
+#define FORMAT "%#x"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%#o"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 123);
+#undef FORMAT
+
+#define FORMAT "%p"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, "123");
+#undef FORMAT
+
+#define FORMAT "%#lx"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0xdeadbeefL);
+#undef FORMAT
+
+#define FORMAT "%zd"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, (size_t)-123);
+#undef FORMAT
+
+#define FORMAT "%#llx"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0xdeadbeefbadcafeLL);
+#undef FORMAT
+
+#define FORMAT "%llo"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT, 0xffffffffffffffffLL);
+#undef FORMAT
+
+#define FORMAT "%%"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT);
+#undef FORMAT
+
+#define FORMAT "%y"
+ TEST_PRINTF("%s: '" FORMAT "'\n", FORMAT);
+#undef FORMAT
+
+ char stra[10], strb[10];
+ int la, lb;
+ la = snprintf(stra, sizeof(stra), "%s", "123456789a");
+ printf(" printf: %s\n", stra);
+ lb = xsnprintf(strb, sizeof(strb), "%s", "123456789a");
+ xprintf("xprintf: %s\n", stra);
+ assert(la == lb);
+ assert(strncmp(stra, strb, 10) == 0);
+
+#define FORMAT "12%n3%#08x4%n5"
+ int lc, ld;
+ sprintf(stra, FORMAT, &la, 123, &lb);
+ printf(" printf: la: %d, lb: %d\n", la, lb);
+ xsprintf(strb, FORMAT, &lc, 123, &ld);
+ xprintf("xprintf: lc: %d, ld: %d\n", lc, ld);
+ assert(la == lc);
+ assert(lb == ld);
+#undef FORMAT
+
+ la = snprintf(NULL, 0, "%s", "123");
+ printf(" printf: %d\n", la);
+ lb = xsnprintf(NULL, 0, "%s", "123");
+ xprintf("xprintf: %d\n", lb);
+ assert(la == lb);
+
+ return 0;
+}
diff --git a/xprintf.c b/xprintf.c
new file mode 100644
index 0000000..384fab4
--- /dev/null
+++ b/xprintf.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#include "xprintf.h"
+
+/*
+ * Formatting flags.
+ *
+ * FORMAT_LOWER must be 0x20 as it is OR'd with digits, eg.
+ * '0': 0x30 | 0x20 => 0x30 ('0')
+ * 'A': 0x41 | 0x20 => 0x61 ('a')
+ */
+#define FORMAT_ALT_FORM 0x01 /* "Alternate form" */
+#define FORMAT_ZERO_PAD 0x02 /* Zero padding on the left */
+#define FORMAT_LEFT_JUSTIFY 0x04 /* Align text on the left */
+#define FORMAT_BLANK 0x08 /* Blank space before positive number */
+#define FORMAT_SIGN 0x10 /* Always place a sign (either + or -) */
+#define FORMAT_LOWER 0x20 /* To lowercase (for %x) */
+#define FORMAT_CONV_SIGNED 0x40 /* Format specifies signed conversion */
+
+enum {
+ MODIFIER_NONE,
+ MODIFIER_CHAR,
+ MODIFIER_SHORT,
+ MODIFIER_LONG,
+ MODIFIER_LONGLONG,
+ MODIFIER_PTR, /* Used only for %p */
+ MODIFIER_SIZE,
+ MODIFIER_PTRDIFF
+};
+
+enum {
+ SPECIFIER_INVALID,
+ SPECIFIER_INT,
+ SPECIFIER_CHAR,
+ SPECIFIER_STR,
+ SPECIFIER_NRCHARS,
+ SPECIFIER_PERCENT
+};
+
+/*
+ * Size for the temporary number buffer. The minimum base is 8 so 3 bits
+ * are consumed per digit. Add one to round up. The conversion algorithm
+ * doesn't use the null byte.
+ */
+#define MAX_NUM_SIZE (((sizeof(unsigned long long) * CHAR_BIT) / 3) + 1)
+
+/*
+ * Special size for xvsnprintf(), used by xsprintf()/xvsprintf() when the
+ * buffer size is unknown.
+ */
+#define XPRINT_NOLIMIT ((size_t)-1)
+
+/*
+ * Size of the static buffer used by xprintf()/xvprintf().
+ */
+#define XPRINT_BUFSIZE 1024
+
+static char xprint_buffer[XPRINT_BUFSIZE];
+static pthread_mutex_t xprint_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static const char digits[] = "0123456789ABCDEF";
+
+static inline char * xputchar(char *str, char *end, char c)
+{
+ if (str < end)
+ *str = c;
+
+ str++;
+
+ return str;
+}
+
+static inline int xisdigit(char c)
+{
+ return (c >= '0') && (c <= '9');
+}
+
+int xprintf(const char *format, ...)
+{
+ va_list ap;
+ int length;
+
+ va_start(ap, format);
+ length = xvprintf(format, ap);
+ va_end(ap);
+
+ return length;
+}
+
+int xvprintf(const char *format, va_list ap)
+{
+ size_t size;
+ int length;
+
+ pthread_mutex_lock(&xprint_mutex);
+ length = xvsnprintf(xprint_buffer, sizeof(xprint_buffer), format, ap);
+ size = ((unsigned int)length >= sizeof(xprint_buffer))
+ ? sizeof(xprint_buffer) - 1
+ : (unsigned int)length;
+ fwrite(xprint_buffer, 1, size, stdout);
+ pthread_mutex_unlock(&xprint_mutex);
+
+ return length;
+}
+
+int xsprintf(char *str, const char *format, ...)
+{
+ va_list ap;
+ int length;
+
+ va_start(ap, format);
+ length = xvsprintf(str, format, ap);
+ va_end(ap);
+
+ return length;
+}
+
+int xvsprintf(char *str, const char *format, va_list ap)
+{
+ return xvsnprintf(str, XPRINT_NOLIMIT, format, ap);
+}
+
+int xsnprintf(char *str, size_t size, const char *format, ...)
+{
+ va_list ap;
+ int length;
+
+ va_start(ap, format);
+ length = xvsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return length;
+}
+
+int xvsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ unsigned long long n;
+ int i, len, found, flags, width, precision, modifier, specifier, shift;
+ unsigned char r, base, mask;
+ char c, *s, *start, *end, sign, tmp[MAX_NUM_SIZE];
+
+ start = str;
+
+ if (size == 0)
+ end = NULL;
+ else if (size == XPRINT_NOLIMIT)
+ end = (char *)-1;
+ else
+ end = start + size - 1;
+
+ while ((c = *format) != '\0') {
+ if (c != '%') {
+ str = xputchar(str, end, c);
+ format++;
+ continue;
+ }
+
+ /* Flags */
+
+ found = 1;
+ flags = 0;
+
+ do {
+ format++;
+ c = *format;
+
+ switch (c) {
+ case '#':
+ flags |= FORMAT_ALT_FORM;
+ break;
+ case '0':
+ flags |= FORMAT_ZERO_PAD;
+ break;
+ case '-':
+ flags |= FORMAT_LEFT_JUSTIFY;
+ break;
+ case ' ':
+ flags |= FORMAT_BLANK;
+ break;
+ case '+':
+ flags |= FORMAT_SIGN;
+ break;
+ default:
+ found = 0;
+ break;
+ }
+ } while (found);
+
+ /* Width */
+
+ if (xisdigit(c)) {
+ width = 0;
+
+ while (xisdigit(c)) {
+ width = width * 10 + (c - '0');
+ format++;
+ c = *format;
+ }
+ } else if (c == '*') {
+ width = va_arg(ap, int);
+
+ if (width < 0) {
+ flags |= FORMAT_LEFT_JUSTIFY;
+ width = -width;
+ }
+
+ format++;
+ c = *format;
+ } else {
+ width = 0;
+ }
+
+ /* Precision */
+
+ if (c == '.') {
+ format++;
+ c = *format;
+
+ if (xisdigit(c)) {
+ precision = 0;
+
+ while (xisdigit(c)) {
+ precision = precision * 10 + (c - '0');
+ format++;
+ c = *format;
+ }
+ } else if (c == '*') {
+ precision = va_arg(ap, int);
+
+ if (precision < 0)
+ precision = 0;
+
+ format++;
+ c = *format;
+ } else {
+ precision = 0;
+ }
+ } else {
+ /* precision is >= 0 only if explicit */
+ precision = -1;
+ }
+
+ /* Length modifier */
+
+ switch (c) {
+ case 'h':
+ case 'l':
+ format++;
+
+ if (c == *format) {
+ modifier = (c == 'h') ? MODIFIER_CHAR : MODIFIER_LONGLONG;
+ goto skip_modifier;
+ } else {
+ modifier = (c == 'h') ? MODIFIER_SHORT : MODIFIER_LONG;
+ c = *format;
+ }
+
+ break;
+ case 'z':
+ modifier = MODIFIER_SIZE;
+ goto skip_modifier;
+ case 't':
+ modifier = MODIFIER_PTRDIFF;
+skip_modifier:
+ format++;
+ c = *format;
+ break;
+ default:
+ modifier = MODIFIER_NONE;
+ break;
+ }
+
+ /* Specifier */
+
+ switch (c) {
+ case 'd':
+ case 'i':
+ flags |= FORMAT_CONV_SIGNED;
+ case 'u':
+ base = 10;
+ goto integer;
+ case 'o':
+ base = 8;
+ goto integer;
+ case 'p':
+ flags |= FORMAT_ALT_FORM;
+ modifier = MODIFIER_PTR;
+ case 'x':
+ flags |= FORMAT_LOWER;
+ case 'X':
+ base = 16;
+integer:
+ specifier = SPECIFIER_INT;
+ break;
+ case 'c':
+ specifier = SPECIFIER_CHAR;
+ break;
+ case 's':
+ specifier = SPECIFIER_STR;
+ break;
+ case 'n':
+ specifier = SPECIFIER_NRCHARS;
+ break;
+ case '%':
+ specifier = SPECIFIER_PERCENT;
+ break;
+ default:
+ specifier = SPECIFIER_INVALID;
+ break;
+ }
+
+ /* Output */
+
+ switch (specifier) {
+ case SPECIFIER_INT:
+ switch (modifier) {
+ case MODIFIER_CHAR:
+ if (flags & FORMAT_CONV_SIGNED)
+ n = (signed char)va_arg(ap, int);
+ else
+ n = (unsigned char)va_arg(ap, int);
+ break;
+ case MODIFIER_SHORT:
+ if (flags & FORMAT_CONV_SIGNED)
+ n = (short)va_arg(ap, int);
+ else
+ n = (unsigned short)va_arg(ap, int);
+ break;
+ case MODIFIER_LONG:
+ if (flags & FORMAT_CONV_SIGNED)
+ n = va_arg(ap, long);
+ else
+ n = va_arg(ap, unsigned long);
+ break;
+ case MODIFIER_LONGLONG:
+ if (flags & FORMAT_CONV_SIGNED)
+ n = va_arg(ap, long long);
+ else
+ n = va_arg(ap, unsigned long long);
+ break;
+ case MODIFIER_PTR:
+ n = (unsigned long)va_arg(ap, void *);
+ break;
+ case MODIFIER_SIZE:
+ if (flags & FORMAT_CONV_SIGNED)
+ n = va_arg(ap, ssize_t);
+ else
+ n = va_arg(ap, size_t);
+ break;
+ case MODIFIER_PTRDIFF:
+ n = va_arg(ap, ptrdiff_t);
+ break;
+ default:
+ if (flags & FORMAT_CONV_SIGNED)
+ n = va_arg(ap, int);
+ else
+ n = va_arg(ap, unsigned int);
+ break;
+ }
+
+ if ((flags & FORMAT_LEFT_JUSTIFY) || (precision >= 0))
+ flags &= ~FORMAT_ZERO_PAD;
+
+ sign = 0;
+
+ if (flags & FORMAT_ALT_FORM) {
+ /* '0' for octal */
+ width--;
+
+ /* '0x' or '0X' for hexadecimal */
+ if (base == 16)
+ width--;
+ } else if (flags & FORMAT_CONV_SIGNED) {
+ if ((long long)n < 0) {
+ sign = '-';
+ width--;
+ n = -(long long)n;
+ } else if (flags & FORMAT_SIGN) {
+ /* FORMAT_SIGN must precede FORMAT_BLANK. */
+ sign = '+';
+ width--;
+ } else if (flags & FORMAT_BLANK) {
+ sign = ' ';
+ width--;
+ }
+ }
+
+ /* Conversion, in reverse order */
+
+ i = 0;
+
+ if (n == 0) {
+ if (precision != 0)
+ tmp[i++] = '0';
+ } else if (base == 10) {
+ /*
+ * Try to avoid 64 bits operations if the processor doesn't
+ * support them. Note that even when using modulus and
+ * division operators close to each other, the compiler may
+ * forge two calls to __udivdi3() and __umoddi3() instead of
+ * one to __udivmoddi3(), whereas processor instructions are
+ * generally correctly used once, giving both the remainder
+ * and the quotient, through plain or reciprocal division.
+ */
+#ifndef __LP64__
+ if (modifier == MODIFIER_LONGLONG) {
+#endif /* __LP64__ */
+ do {
+ r = n % 10;
+ n /= 10;
+ tmp[i++] = digits[r];
+ } while (n != 0);
+#ifndef __LP64__
+ } else {
+ unsigned long m;
+
+ m = (unsigned long)n;
+
+ do {
+ r = m % 10;
+ m /= 10;
+ tmp[i++] = digits[r];
+ } while (m != 0);
+ }
+#endif /* __LP64__ */
+ } else {
+ mask = base - 1;
+ shift = (base == 8) ? 3 : 4;
+
+ do {
+ r = (unsigned char)n & mask;
+ n >>= shift;
+ tmp[i++] = digits[r] | (flags & FORMAT_LOWER);
+ } while (n != 0);
+ }
+
+ if (i > precision)
+ precision = i;
+
+ width -= precision;
+
+ if (!(flags & (FORMAT_LEFT_JUSTIFY | FORMAT_ZERO_PAD)))
+ while (width-- > 0)
+ str = xputchar(str, end, ' ');
+
+ if (flags & FORMAT_ALT_FORM) {
+ str = xputchar(str, end, '0');
+
+ if (base == 16)
+ str = xputchar(str, end, 'X' | (flags & FORMAT_LOWER));
+ } else if (sign) {
+ str = xputchar(str, end, sign);
+ }
+
+ if (!(flags & FORMAT_LEFT_JUSTIFY)) {
+ c = (flags & FORMAT_ZERO_PAD) ? '0' : ' ';
+
+ while (width-- > 0)
+ str = xputchar(str, end, c);
+ }
+
+ while (i < precision--)
+ str = xputchar(str, end, '0');
+
+ while (i-- > 0)
+ str = xputchar(str, end, tmp[i]);
+
+ while (width-- > 0)
+ str = xputchar(str, end, ' ');
+
+ break;
+ case SPECIFIER_CHAR:
+ c = (unsigned char)va_arg(ap, int);
+
+ if (!(flags & FORMAT_LEFT_JUSTIFY))
+ while (--width > 0)
+ str = xputchar(str, end, ' ');
+
+ str = xputchar(str, end, c);
+
+ while (--width > 0)
+ str = xputchar(str, end, ' ');
+
+ break;
+ case SPECIFIER_STR:
+ s = va_arg(ap, char *);
+
+ if (s == NULL)
+ s = "(null)";
+
+ len = 0;
+
+ for (len = 0; s[len] != '\0'; len++)
+ if (len == precision)
+ break;
+
+ if (!(flags & FORMAT_LEFT_JUSTIFY))
+ while (len < width--)
+ str = xputchar(str, end, ' ');
+
+ for (i = 0; i < len; i++) {
+ str = xputchar(str, end, *s);
+ s++;
+ }
+
+ while (len < width--)
+ str = xputchar(str, end, ' ');
+
+ break;
+ case SPECIFIER_NRCHARS:
+ if (modifier == MODIFIER_CHAR) {
+ signed char *ptr = va_arg(ap, signed char *);
+ *ptr = str - start;
+ } else if (modifier == MODIFIER_SHORT) {
+ short *ptr = va_arg(ap, short *);
+ *ptr = str - start;
+ } else if (modifier == MODIFIER_LONG) {
+ long *ptr = va_arg(ap, long *);
+ *ptr = str - start;
+ } else if (modifier == MODIFIER_LONGLONG) {
+ long long *ptr = va_arg(ap, long long *);
+ *ptr = str - start;
+ } else if (modifier == MODIFIER_SIZE) {
+ ssize_t *ptr = va_arg(ap, ssize_t *);
+ *ptr = str - start;
+ } else if (modifier == MODIFIER_PTRDIFF) {
+ ptrdiff_t *ptr = va_arg(ap, ptrdiff_t *);
+ *ptr = str - start;
+ } else {
+ int *ptr = va_arg(ap, int *);
+ *ptr = str - start;
+ }
+
+ break;
+ case SPECIFIER_PERCENT:
+ case SPECIFIER_INVALID:
+ str = xputchar(str, end, '%');
+ break;
+ default:
+ break;
+ }
+
+ if (specifier != SPECIFIER_INVALID)
+ format++;
+ }
+
+ if (str < end)
+ *str = '\0';
+ else if (end != NULL)
+ *end = '\0';
+
+ return str - start;
+}
diff --git a/xprintf.h b/xprintf.h
new file mode 100644
index 0000000..7634a9f
--- /dev/null
+++ b/xprintf.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Formatted output functions.
+ *
+ * The functions provided by this module implement a subset of the C99
+ * printf() like functions, mostly centered around character, string, and
+ * integer conversions.
+ *
+ * The supported specifiers are: d i o u x X c s p n %
+ * The supported length modifiers are: hh h l ll z t
+ *
+ * The xprintf() and xvprintf() functions internally use a statically
+ * allocated buffer. Although they are thread-safe, they won't produce
+ * output larger than 1 KiB.
+ */
+
+#ifndef _XPRINTF_H
+#define _XPRINTF_H
+
+#include <stdarg.h>
+
+#include "macros.h"
+
+int xprintf(const char *format, ...) __format_printf(1, 2);
+int xvprintf(const char *format, va_list ap) __format_printf(1, 0);
+
+int xsprintf(char *str, const char *format, ...) __format_printf(2, 3);
+int xvsprintf(char *str, const char *format, va_list ap) __format_printf(2, 0);
+
+int xsnprintf(char *str, size_t size, const char *format, ...)
+ __format_printf(3, 4);
+int xvsnprintf(char *str, size_t size, const char *format, va_list ap)
+ __format_printf(3, 0);
+
+#endif /* _XPRINTF_H */