From 7bc54a622e0c57a1085cd2990a1deedc8bd4743d Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Tue, 13 Dec 2011 20:27:56 +0000 Subject: Import the slab allocator As it is intended to completely replace the zone allocator, remove it on the way. So long to the venerable code ! * Makefrag.am (libkernel_a_SOURCES): Add kern/slab.{c,h}, remove kern/kalloc.c and kern/zalloc.{c,h}. * configfrag.ac (SLAB_VERIFY, SLAB_USE_CPU_POOLS): Add defines. * i386/Makefrag.am (libkernel_a_SOURCES): Remove i386/i386/zalloc.h. * i386/configfrag.ac (CPU_L1_SHIFT): Remove define. * include/mach_debug/slab_info.h: New file. * kern/slab.c: Likewise. * kern/slab.h: Likewise. * i386/i386/zalloc.h: Remove file. * include/mach_debug/zone_info.h: Likewise. * kern/kalloc.c: Likewise. * kern/zalloc.c: Likewise. * kern/zalloc.h: Likewise. --- kern/slab.c | 1576 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1576 insertions(+) create mode 100644 kern/slab.c (limited to 'kern/slab.c') diff --git a/kern/slab.c b/kern/slab.c new file mode 100644 index 00000000..38413e83 --- /dev/null +++ b/kern/slab.c @@ -0,0 +1,1576 @@ +/* + * Copyright (c) 2009, 2010, 2011 Richard Braun. + * Copyright (c) 2011 Maksym Planeta. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * Object caching and general purpose memory allocator. + * + * This allocator is based on the paper "The Slab Allocator: An Object-Caching + * Kernel Memory Allocator" by Jeff Bonwick. + * + * It allows the allocation of objects (i.e. fixed-size typed buffers) from + * caches and is efficient in both space and time. This implementation follows + * many of the indications from the paper mentioned. The most notable + * differences are outlined below. + * + * The per-cache self-scaling hash table for buffer-to-bufctl conversion, + * described in 3.2.3 "Slab Layout for Large Objects", has been replaced by + * a red-black tree storing slabs, sorted by address. The use of a + * self-balancing tree for buffer-to-slab conversions provides a few advantages + * over a hash table. Unlike a hash table, a BST provides a "lookup nearest" + * operation, so obtaining the slab data (whether it is embedded in the slab or + * off slab) from a buffer address simply consists of a "lookup nearest towards + * 0" tree search. Storing slabs instead of buffers also considerably reduces + * the number of elements to retain. Finally, a self-balancing tree is a true + * self-scaling data structure, whereas a hash table requires periodic + * maintenance and complete resizing, which is expensive. The only drawback is + * that releasing a buffer to the slab layer takes logarithmic time instead of + * constant time. But as the data set size is kept reasonable (because slabs + * are stored instead of buffers) and because the CPU pool layer services most + * requests, avoiding many accesses to the slab layer, it is considered an + * acceptable tradeoff. + * + * This implementation uses per-cpu pools of objects, which service most + * allocation requests. These pools act as caches (but are named differently + * to avoid confusion with CPU caches) that reduce contention on multiprocessor + * systems. When a pool is empty and cannot provide an object, it is filled by + * transferring multiple objects from the slab layer. The symmetric case is + * handled likewise. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef MACH_DEBUG +#include +#endif + +/* + * Utility macros. + */ +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0) +#define ISP2(x) P2ALIGNED(x, x) +#define P2ALIGN(x, a) ((x) & -(a)) +#define P2ROUND(x, a) (-(-(x) & -(a))) +#define P2END(x, a) (-(~(x) & -(a))) +#define likely(expr) __builtin_expect(!!(expr), 1) +#define unlikely(expr) __builtin_expect(!!(expr), 0) + +/* + * Minimum required alignment. + */ +#define KMEM_ALIGN_MIN 8 + +/* + * Minimum number of buffers per slab. + * + * This value is ignored when the slab size exceeds a threshold. + */ +#define KMEM_MIN_BUFS_PER_SLAB 8 + +/* + * Special slab size beyond which the minimum number of buffers per slab is + * ignored when computing the slab size of a cache. + */ +#define KMEM_SLAB_SIZE_THRESHOLD (8 * PAGE_SIZE) + +/* + * Special buffer size under which slab data is unconditionnally allocated + * from its associated slab. + */ +#define KMEM_BUF_SIZE_THRESHOLD (PAGE_SIZE / 8) + +/* + * Time (in seconds) between two garbage collection operations. + */ +#define KMEM_GC_INTERVAL (1 * hz) + +/* + * The transfer size of a CPU pool is computed by dividing the pool size by + * this value. + */ +#define KMEM_CPU_POOL_TRANSFER_RATIO 2 + +/* + * Redzone guard word. + */ +#ifdef __LP64__ +#if _HOST_BIG_ENDIAN +#define KMEM_REDZONE_WORD 0xfeedfacefeedfaceUL +#else /* _HOST_BIG_ENDIAN */ +#define KMEM_REDZONE_WORD 0xcefaedfecefaedfeUL +#endif /* _HOST_BIG_ENDIAN */ +#else /* __LP64__ */ +#if _HOST_BIG_ENDIAN +#define KMEM_REDZONE_WORD 0xfeedfaceUL +#else /* _HOST_BIG_ENDIAN */ +#define KMEM_REDZONE_WORD 0xcefaedfeUL +#endif /* _HOST_BIG_ENDIAN */ +#endif /* __LP64__ */ + +/* + * Redzone byte for padding. + */ +#define KMEM_REDZONE_BYTE 0xbb + +/* + * Size of the VM submap from which default backend functions allocate. + */ +#define KMEM_MAP_SIZE (64 * 1024 * 1024) + +/* + * Shift for the first kalloc cache size. + */ +#define KALLOC_FIRST_SHIFT 5 + +/* + * Number of caches backing general purpose allocations. + */ +#define KALLOC_NR_CACHES 13 + +/* + * Size of the VM submap for general purpose allocations. + */ +#define KALLOC_MAP_SIZE (64 * 1024 * 1024) + +/* + * Values the buftag state member can take. + */ +#ifdef __LP64__ +#if _HOST_BIG_ENDIAN +#define KMEM_BUFTAG_ALLOC 0xa110c8eda110c8edUL +#define KMEM_BUFTAG_FREE 0xf4eeb10cf4eeb10cUL +#else /* _HOST_BIG_ENDIAN */ +#define KMEM_BUFTAG_ALLOC 0xedc810a1edc810a1UL +#define KMEM_BUFTAG_FREE 0x0cb1eef40cb1eef4UL +#endif /* _HOST_BIG_ENDIAN */ +#else /* __LP64__ */ +#if _HOST_BIG_ENDIAN +#define KMEM_BUFTAG_ALLOC 0xa110c8edUL +#define KMEM_BUFTAG_FREE 0xf4eeb10cUL +#else /* _HOST_BIG_ENDIAN */ +#define KMEM_BUFTAG_ALLOC 0xedc810a1UL +#define KMEM_BUFTAG_FREE 0x0cb1eef4UL +#endif /* _HOST_BIG_ENDIAN */ +#endif /* __LP64__ */ + +/* + * Free and uninitialized patterns. + * + * These values are unconditionnally 64-bit wide since buffers are at least + * 8-byte aligned. + */ +#if _HOST_BIG_ENDIAN +#define KMEM_FREE_PATTERN 0xdeadbeefdeadbeefULL +#define KMEM_UNINIT_PATTERN 0xbaddcafebaddcafeULL +#else /* _HOST_BIG_ENDIAN */ +#define KMEM_FREE_PATTERN 0xefbeaddeefbeaddeULL +#define KMEM_UNINIT_PATTERN 0xfecaddbafecaddbaULL +#endif /* _HOST_BIG_ENDIAN */ + +/* + * Cache flags. + * + * The flags don't change once set and can be tested without locking. + */ +#define KMEM_CF_NO_CPU_POOL 0x01 /* CPU pool layer disabled */ +#define KMEM_CF_SLAB_EXTERNAL 0x02 /* Slab data is off slab */ +#define KMEM_CF_NO_RECLAIM 0x04 /* Slabs are not reclaimable */ +#define KMEM_CF_VERIFY 0x08 /* Debugging facilities enabled */ +#define KMEM_CF_DIRECT 0x10 /* No buf-to-slab tree lookup */ + +/* + * Options for kmem_cache_alloc_verify(). + */ +#define KMEM_AV_NOCONSTRUCT 0 +#define KMEM_AV_CONSTRUCT 1 + +/* + * Error codes for kmem_cache_error(). + */ +#define KMEM_ERR_INVALID 0 /* Invalid address being freed */ +#define KMEM_ERR_DOUBLEFREE 1 /* Freeing already free address */ +#define KMEM_ERR_BUFTAG 2 /* Invalid buftag content */ +#define KMEM_ERR_MODIFIED 3 /* Buffer modified while free */ +#define KMEM_ERR_REDZONE 4 /* Redzone violation */ + +#if SLAB_USE_CPU_POOLS +/* + * Available CPU pool types. + * + * For each entry, the CPU pool size applies from the entry buf_size + * (excluded) up to (and including) the buf_size of the preceding entry. + * + * See struct kmem_cpu_pool_type for a description of the values. + */ +static struct kmem_cpu_pool_type kmem_cpu_pool_types[] = { + { 32768, 1, 0, NULL }, + { 4096, 8, CPU_L1_SIZE, NULL }, + { 256, 64, CPU_L1_SIZE, NULL }, + { 0, 128, CPU_L1_SIZE, NULL } +}; + +/* + * Caches where CPU pool arrays are allocated from. + */ +static struct kmem_cache kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)]; +#endif /* SLAB_USE_CPU_POOLS */ + +/* + * Cache for off slab data. + */ +static struct kmem_cache kmem_slab_cache; + +/* + * General purpose caches array. + */ +static struct kmem_cache kalloc_caches[KALLOC_NR_CACHES]; + +/* + * List of all caches managed by the allocator. + */ +static struct list kmem_cache_list; +static unsigned int kmem_nr_caches; +static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock; + +/* + * VM submap for slab caches (except general purpose allocations). + */ +static struct vm_map kmem_map_store; +vm_map_t kmem_map = &kmem_map_store; + +/* + * VM submap for general purpose allocations. + */ +static struct vm_map kalloc_map_store; +vm_map_t kalloc_map = &kalloc_map_store; + +/* + * Time of the last memory reclaim, in clock ticks. + */ +static unsigned int kmem_gc_last_tick; + +#define kmem_error(format, ...) \ + printf("mem: error: %s(): " format "\n", __func__, \ + ## __VA_ARGS__) + +#define kmem_warn(format, ...) \ + printf("mem: warning: %s(): " format "\n", __func__, \ + ## __VA_ARGS__) + +#define kmem_print(format, ...) \ + printf(format "\n", ## __VA_ARGS__) + +static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error, + void *arg); +static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache); +static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf); + +static void * kmem_buf_verify_bytes(void *buf, void *pattern, size_t size) +{ + char *ptr, *pattern_ptr, *end; + + end = buf + size; + + for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++) + if (*ptr != *pattern_ptr) + return ptr; + + return NULL; +} + +static void * kmem_buf_verify(void *buf, uint64_t pattern, vm_size_t size) +{ + uint64_t *ptr, *end; + + assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t))); + assert(P2ALIGNED(size, sizeof(uint64_t))); + + end = buf + size; + + for (ptr = buf; ptr < end; ptr++) + if (*ptr != pattern) + return kmem_buf_verify_bytes(ptr, &pattern, sizeof(pattern)); + + return NULL; +} + +static void kmem_buf_fill(void *buf, uint64_t pattern, size_t size) +{ + uint64_t *ptr, *end; + + assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t))); + assert(P2ALIGNED(size, sizeof(uint64_t))); + + end = buf + size; + + for (ptr = buf; ptr < end; ptr++) + *ptr = pattern; +} + +static void * kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new, + size_t size) +{ + uint64_t *ptr, *end; + + assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t))); + assert(P2ALIGNED(size, sizeof(uint64_t))); + + end = buf + size; + + for (ptr = buf; ptr < end; ptr++) { + if (*ptr != old) + return kmem_buf_verify_bytes(ptr, &old, sizeof(old)); + + *ptr = new; + } + + return NULL; +} + +static inline union kmem_bufctl * +kmem_buf_to_bufctl(void *buf, struct kmem_cache *cache) +{ + return (union kmem_bufctl *)(buf + cache->bufctl_dist); +} + +static inline struct kmem_buftag * +kmem_buf_to_buftag(void *buf, struct kmem_cache *cache) +{ + return (struct kmem_buftag *)(buf + cache->buftag_dist); +} + +static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl, + struct kmem_cache *cache) +{ + return (void *)bufctl - cache->bufctl_dist; +} + +static vm_offset_t kmem_pagealloc(vm_size_t size) +{ + vm_offset_t addr; + kern_return_t kr; + + kr = kmem_alloc_wired(kmem_map, &addr, size); + + if (kr != KERN_SUCCESS) + return 0; + + return addr; +} + +static void kmem_pagefree(vm_offset_t ptr, vm_size_t size) +{ + kmem_free(kmem_map, ptr, size); +} + +static void kmem_slab_create_verify(struct kmem_slab *slab, + struct kmem_cache *cache) +{ + struct kmem_buftag *buftag; + size_t buf_size; + unsigned long buffers; + void *buf; + + buf_size = cache->buf_size; + buf = slab->addr; + buftag = kmem_buf_to_buftag(buf, cache); + + for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) { + kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist); + buftag->state = KMEM_BUFTAG_FREE; + buf += buf_size; + buftag = kmem_buf_to_buftag(buf, cache); + } +} + +/* + * Create an empty slab for a cache. + * + * The caller must drop all locks before calling this function. + */ +static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, + size_t color) +{ + struct kmem_slab *slab; + union kmem_bufctl *bufctl; + size_t buf_size; + unsigned long buffers; + void *slab_buf; + + if (cache->slab_alloc_fn == NULL) + slab_buf = (void *)kmem_pagealloc(cache->slab_size); + else + slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size); + + if (slab_buf == NULL) + return NULL; + + if (cache->flags & KMEM_CF_SLAB_EXTERNAL) { + assert(!(cache->flags & KMEM_CF_NO_RECLAIM)); + slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache); + + if (slab == NULL) { + if (cache->slab_free_fn == NULL) + kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size); + else + cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size); + + return NULL; + } + } else { + slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1; + } + + list_node_init(&slab->list_node); + rbtree_node_init(&slab->tree_node); + slab->nr_refs = 0; + slab->first_free = NULL; + slab->addr = slab_buf + color; + + buf_size = cache->buf_size; + bufctl = kmem_buf_to_bufctl(slab->addr, cache); + + for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) { + bufctl->next = slab->first_free; + slab->first_free = bufctl; + bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size); + } + + if (cache->flags & KMEM_CF_VERIFY) + kmem_slab_create_verify(slab, cache); + + return slab; +} + +static void kmem_slab_destroy_verify(struct kmem_slab *slab, + struct kmem_cache *cache) +{ + struct kmem_buftag *buftag; + size_t buf_size; + unsigned long buffers; + void *buf, *addr; + + buf_size = cache->buf_size; + buf = slab->addr; + buftag = kmem_buf_to_buftag(buf, cache); + + for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) { + if (buftag->state != KMEM_BUFTAG_FREE) + kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag); + + addr = kmem_buf_verify(buf, KMEM_FREE_PATTERN, cache->bufctl_dist); + + if (addr != NULL) + kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr); + + buf += buf_size; + buftag = kmem_buf_to_buftag(buf, cache); + } +} + +/* + * Destroy a slab. + * + * The caller must drop all locks before calling this function. + */ +static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache) +{ + vm_offset_t slab_buf; + + assert(slab->nr_refs == 0); + assert(slab->first_free != NULL); + assert(!(cache->flags & KMEM_CF_NO_RECLAIM)); + + if (cache->flags & KMEM_CF_VERIFY) + kmem_slab_destroy_verify(slab, cache); + + slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE); + + if (cache->slab_free_fn == NULL) + kmem_pagefree(slab_buf, cache->slab_size); + else + cache->slab_free_fn(slab_buf, cache->slab_size); + + if (cache->flags & KMEM_CF_SLAB_EXTERNAL) + kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab); +} + +static inline int kmem_slab_use_tree(int flags) +{ + return !(flags & KMEM_CF_DIRECT) || (flags & KMEM_CF_VERIFY); +} + +static inline int kmem_slab_cmp_lookup(const void *addr, + const struct rbtree_node *node) +{ + struct kmem_slab *slab; + + slab = rbtree_entry(node, struct kmem_slab, tree_node); + + if (addr == slab->addr) + return 0; + else if (addr < slab->addr) + return -1; + else + return 1; +} + +static inline int kmem_slab_cmp_insert(const struct rbtree_node *a, + const struct rbtree_node *b) +{ + struct kmem_slab *slab; + + slab = rbtree_entry(a, struct kmem_slab, tree_node); + return kmem_slab_cmp_lookup(slab->addr, b); +} + +#if SLAB_USE_CPU_POOLS +static void kmem_cpu_pool_init(struct kmem_cpu_pool *cpu_pool, + struct kmem_cache *cache) +{ + simple_lock_init(&cpu_pool->lock); + cpu_pool->flags = cache->flags; + cpu_pool->size = 0; + cpu_pool->transfer_size = 0; + cpu_pool->nr_objs = 0; + cpu_pool->array = NULL; +} + +/* + * Return a CPU pool. + * + * This function will generally return the pool matching the CPU running the + * calling thread. Because of context switches and thread migration, the + * caller might be running on another processor after this function returns. + * Although not optimal, this should rarely happen, and it doesn't affect the + * allocator operations in any other way, as CPU pools are always valid, and + * their access is serialized by a lock. + */ +static inline struct kmem_cpu_pool * kmem_cpu_pool_get(struct kmem_cache *cache) +{ + return &cache->cpu_pools[cpu_number()]; +} + +static inline void kmem_cpu_pool_build(struct kmem_cpu_pool *cpu_pool, + struct kmem_cache *cache, void **array) +{ + cpu_pool->size = cache->cpu_pool_type->array_size; + cpu_pool->transfer_size = (cpu_pool->size + + KMEM_CPU_POOL_TRANSFER_RATIO - 1) + / KMEM_CPU_POOL_TRANSFER_RATIO; + cpu_pool->array = array; +} + +static inline void * kmem_cpu_pool_pop(struct kmem_cpu_pool *cpu_pool) +{ + cpu_pool->nr_objs--; + return cpu_pool->array[cpu_pool->nr_objs]; +} + +static inline void kmem_cpu_pool_push(struct kmem_cpu_pool *cpu_pool, void *obj) +{ + cpu_pool->array[cpu_pool->nr_objs] = obj; + cpu_pool->nr_objs++; +} + +static int kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool, + struct kmem_cache *cache) +{ + void *obj; + int i; + + simple_lock(&cache->lock); + + for (i = 0; i < cpu_pool->transfer_size; i++) { + obj = kmem_cache_alloc_from_slab(cache); + + if (obj == NULL) + break; + + kmem_cpu_pool_push(cpu_pool, obj); + } + + simple_unlock(&cache->lock); + + return i; +} + +static void kmem_cpu_pool_drain(struct kmem_cpu_pool *cpu_pool, + struct kmem_cache *cache) +{ + void *obj; + int i; + + simple_lock(&cache->lock); + + for (i = cpu_pool->transfer_size; i > 0; i--) { + obj = kmem_cpu_pool_pop(cpu_pool); + kmem_cache_free_to_slab(cache, obj); + } + + simple_unlock(&cache->lock); +} +#endif /* SLAB_USE_CPU_POOLS */ + +static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error, + void *arg) +{ + struct kmem_buftag *buftag; + + kmem_error("cache: %s, buffer: %p", cache->name, (void *)buf); + + switch(error) { + case KMEM_ERR_INVALID: + kmem_error("freeing invalid address"); + break; + case KMEM_ERR_DOUBLEFREE: + kmem_error("attempting to free the same address twice"); + break; + case KMEM_ERR_BUFTAG: + buftag = arg; + kmem_error("invalid buftag content, buftag state: %p", + (void *)buftag->state); + break; + case KMEM_ERR_MODIFIED: + kmem_error("free buffer modified, fault address: %p, " + "offset in buffer: %td", arg, arg - buf); + break; + case KMEM_ERR_REDZONE: + kmem_error("write beyond end of buffer, fault address: %p, " + "offset in buffer: %td", arg, arg - buf); + break; + default: + kmem_error("unknown error"); + } + + /* + * Never reached. + */ +} + +/* + * Compute an appropriate slab size for the given cache. + * + * Once the slab size is known, this function sets the related properties + * (buffers per slab and maximum color). It can also set the KMEM_CF_DIRECT + * and/or KMEM_CF_SLAB_EXTERNAL flags depending on the resulting layout. + */ +static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) +{ + size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size; + size_t waste, waste_min; + int embed, optimal_embed = optimal_embed; + + buf_size = cache->buf_size; + + if (buf_size < KMEM_BUF_SIZE_THRESHOLD) + flags |= KMEM_CACHE_NOOFFSLAB; + + i = 0; + waste_min = (size_t)-1; + + do { + i++; + slab_size = P2ROUND(i * buf_size, PAGE_SIZE); + free_slab_size = slab_size; + + if (flags & KMEM_CACHE_NOOFFSLAB) + free_slab_size -= sizeof(struct kmem_slab); + + buffers = free_slab_size / buf_size; + waste = free_slab_size % buf_size; + + if (buffers > i) + i = buffers; + + if (flags & KMEM_CACHE_NOOFFSLAB) + embed = 1; + else if (sizeof(struct kmem_slab) <= waste) { + embed = 1; + waste -= sizeof(struct kmem_slab); + } else { + embed = 0; + } + + if (waste <= waste_min) { + waste_min = waste; + optimal_size = slab_size; + optimal_embed = embed; + } + } while ((buffers < KMEM_MIN_BUFS_PER_SLAB) + && (slab_size < KMEM_SLAB_SIZE_THRESHOLD)); + + assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed); + + cache->slab_size = optimal_size; + slab_size = cache->slab_size - (optimal_embed + ? sizeof(struct kmem_slab) + : 0); + cache->bufs_per_slab = slab_size / buf_size; + cache->color_max = slab_size % buf_size; + + if (cache->color_max >= PAGE_SIZE) + cache->color_max = PAGE_SIZE - 1; + + if (optimal_embed) { + if (cache->slab_size == PAGE_SIZE) + cache->flags |= KMEM_CF_DIRECT; + } else { + cache->flags |= KMEM_CF_SLAB_EXTERNAL; + } +} + +void kmem_cache_init(struct kmem_cache *cache, const char *name, + size_t obj_size, size_t align, kmem_cache_ctor_t ctor, + kmem_slab_alloc_fn_t slab_alloc_fn, + kmem_slab_free_fn_t slab_free_fn, int flags) +{ +#if SLAB_USE_CPU_POOLS + struct kmem_cpu_pool_type *cpu_pool_type; + size_t i; +#endif /* SLAB_USE_CPU_POOLS */ + size_t buf_size; + +#if SLAB_VERIFY + cache->flags = KMEM_CF_VERIFY; +#else /* SLAB_VERIFY */ + cache->flags = 0; +#endif /* SLAB_VERIFY */ + + if (flags & KMEM_CACHE_NOCPUPOOL) + cache->flags |= KMEM_CF_NO_CPU_POOL; + + if (flags & KMEM_CACHE_NORECLAIM) { + assert(slab_free_fn == NULL); + flags |= KMEM_CACHE_NOOFFSLAB; + cache->flags |= KMEM_CF_NO_RECLAIM; + } + + if (flags & KMEM_CACHE_VERIFY) + cache->flags |= KMEM_CF_VERIFY; + + if (align < KMEM_ALIGN_MIN) + align = KMEM_ALIGN_MIN; + + assert(obj_size > 0); + assert(ISP2(align)); + assert(align < PAGE_SIZE); + + buf_size = P2ROUND(obj_size, align); + + simple_lock_init(&cache->lock); + list_node_init(&cache->node); + list_init(&cache->partial_slabs); + list_init(&cache->free_slabs); + rbtree_init(&cache->active_slabs); + cache->obj_size = obj_size; + cache->align = align; + cache->buf_size = buf_size; + cache->bufctl_dist = buf_size - sizeof(union kmem_bufctl); + cache->color = 0; + cache->nr_objs = 0; + cache->nr_bufs = 0; + cache->nr_slabs = 0; + cache->nr_free_slabs = 0; + cache->ctor = ctor; + cache->slab_alloc_fn = slab_alloc_fn; + cache->slab_free_fn = slab_free_fn; + strncpy(cache->name, name, sizeof(cache->name)); + cache->name[sizeof(cache->name) - 1] = '\0'; + cache->buftag_dist = 0; + cache->redzone_pad = 0; + + if (cache->flags & KMEM_CF_VERIFY) { + cache->bufctl_dist = buf_size; + cache->buftag_dist = cache->bufctl_dist + sizeof(union kmem_bufctl); + cache->redzone_pad = cache->bufctl_dist - cache->obj_size; + buf_size += sizeof(union kmem_bufctl) + sizeof(struct kmem_buftag); + buf_size = P2ROUND(buf_size, align); + cache->buf_size = buf_size; + } + + kmem_cache_compute_sizes(cache, flags); + +#if SLAB_USE_CPU_POOLS + for (cpu_pool_type = kmem_cpu_pool_types; + buf_size <= cpu_pool_type->buf_size; + cpu_pool_type++); + + cache->cpu_pool_type = cpu_pool_type; + + for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) + kmem_cpu_pool_init(&cache->cpu_pools[i], cache); +#endif /* SLAB_USE_CPU_POOLS */ + + simple_lock(&kmem_cache_list_lock); + list_insert_tail(&kmem_cache_list, &cache->node); + kmem_nr_caches++; + simple_unlock(&kmem_cache_list_lock); +} + +static inline int kmem_cache_empty(struct kmem_cache *cache) +{ + return cache->nr_objs == cache->nr_bufs; +} + +static int kmem_cache_grow(struct kmem_cache *cache) +{ + struct kmem_slab *slab; + size_t color; + int empty; + + simple_lock(&cache->lock); + + if (!kmem_cache_empty(cache)) { + simple_unlock(&cache->lock); + return 1; + } + + color = cache->color; + cache->color += cache->align; + + if (cache->color > cache->color_max) + cache->color = 0; + + simple_unlock(&cache->lock); + + slab = kmem_slab_create(cache, color); + + simple_lock(&cache->lock); + + if (slab != NULL) { + list_insert_tail(&cache->free_slabs, &slab->list_node); + cache->nr_bufs += cache->bufs_per_slab; + cache->nr_slabs++; + cache->nr_free_slabs++; + } + + /* + * Even if our slab creation failed, another thread might have succeeded + * in growing the cache. + */ + empty = kmem_cache_empty(cache); + + simple_unlock(&cache->lock); + + return !empty; +} + +static void kmem_cache_reap(struct kmem_cache *cache) +{ + struct kmem_slab *slab; + struct list dead_slabs; + + if (cache->flags & KMEM_CF_NO_RECLAIM) + return; + + list_init(&dead_slabs); + + simple_lock(&cache->lock); + + while (!list_empty(&cache->free_slabs)) { + slab = list_first_entry(&cache->free_slabs, struct kmem_slab, + list_node); + list_remove(&slab->list_node); + list_insert(&dead_slabs, &slab->list_node); + cache->nr_bufs -= cache->bufs_per_slab; + cache->nr_slabs--; + cache->nr_free_slabs--; + } + + simple_unlock(&cache->lock); + + while (!list_empty(&dead_slabs)) { + slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node); + list_remove(&slab->list_node); + kmem_slab_destroy(slab, cache); + } +} + +/* + * Allocate a raw (unconstructed) buffer from the slab layer of a cache. + * + * The cache must be locked before calling this function. + */ +static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache) +{ + struct kmem_slab *slab; + union kmem_bufctl *bufctl; + + if (!list_empty(&cache->partial_slabs)) + slab = list_first_entry(&cache->partial_slabs, struct kmem_slab, + list_node); + else if (!list_empty(&cache->free_slabs)) + slab = list_first_entry(&cache->free_slabs, struct kmem_slab, + list_node); + else + return NULL; + + bufctl = slab->first_free; + assert(bufctl != NULL); + slab->first_free = bufctl->next; + slab->nr_refs++; + cache->nr_objs++; + + /* + * The slab has become complete. + */ + if (slab->nr_refs == cache->bufs_per_slab) { + list_remove(&slab->list_node); + + if (slab->nr_refs == 1) + cache->nr_free_slabs--; + } else if (slab->nr_refs == 1) { + /* + * The slab has become partial. + */ + list_remove(&slab->list_node); + list_insert_tail(&cache->partial_slabs, &slab->list_node); + cache->nr_free_slabs--; + } else if (!list_singular(&cache->partial_slabs)) { + struct list *node; + struct kmem_slab *tmp; + + /* + * The slab remains partial. If there are more than one partial slabs, + * maintain the list sorted. + */ + + assert(slab->nr_refs > 1); + + for (node = list_prev(&slab->list_node); + !list_end(&cache->partial_slabs, node); + node = list_prev(node)) { + tmp = list_entry(node, struct kmem_slab, list_node); + + if (tmp->nr_refs >= slab->nr_refs) + break; + } + + /* + * If the direct neighbor was found, the list is already sorted. + * If no slab was found, the slab is inserted at the head of the list. + */ + if (node != list_prev(&slab->list_node)) { + list_remove(&slab->list_node); + list_insert_after(node, &slab->list_node); + } + } + + if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags)) + rbtree_insert(&cache->active_slabs, &slab->tree_node, + kmem_slab_cmp_insert); + + return kmem_bufctl_to_buf(bufctl, cache); +} + +/* + * Release a buffer to the slab layer of a cache. + * + * The cache must be locked before calling this function. + */ +static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) +{ + struct kmem_slab *slab; + union kmem_bufctl *bufctl; + + if (cache->flags & KMEM_CF_DIRECT) { + assert(cache->slab_size == PAGE_SIZE); + slab = (struct kmem_slab *)P2END((unsigned long)buf, cache->slab_size) + - 1; + } else { + struct rbtree_node *node; + + node = rbtree_lookup_nearest(&cache->active_slabs, buf, + kmem_slab_cmp_lookup, RBTREE_LEFT); + assert(node != NULL); + slab = rbtree_entry(node, struct kmem_slab, tree_node); + assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr + + cache->slab_size, PAGE_SIZE))); + } + + assert(slab->nr_refs >= 1); + assert(slab->nr_refs <= cache->bufs_per_slab); + bufctl = kmem_buf_to_bufctl(buf, cache); + bufctl->next = slab->first_free; + slab->first_free = bufctl; + slab->nr_refs--; + cache->nr_objs--; + + /* + * The slab has become free. + */ + if (slab->nr_refs == 0) { + if (kmem_slab_use_tree(cache->flags)) + rbtree_remove(&cache->active_slabs, &slab->tree_node); + + /* + * The slab was partial. + */ + if (cache->bufs_per_slab > 1) + list_remove(&slab->list_node); + + list_insert_tail(&cache->free_slabs, &slab->list_node); + cache->nr_free_slabs++; + } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) { + /* + * The slab has become partial. + */ + list_insert(&cache->partial_slabs, &slab->list_node); + } else if (!list_singular(&cache->partial_slabs)) { + struct list *node; + struct kmem_slab *tmp; + + /* + * The slab remains partial. If there are more than one partial slabs, + * maintain the list sorted. + */ + + assert(slab->nr_refs > 0); + + for (node = list_next(&slab->list_node); + !list_end(&cache->partial_slabs, node); + node = list_next(node)) { + tmp = list_entry(node, struct kmem_slab, list_node); + + if (tmp->nr_refs <= slab->nr_refs) + break; + } + + /* + * If the direct neighbor was found, the list is already sorted. + * If no slab was found, the slab is inserted at the tail of the list. + */ + if (node != list_next(&slab->list_node)) { + list_remove(&slab->list_node); + list_insert_before(node, &slab->list_node); + } + } +} + +static void kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, + int construct) +{ + struct kmem_buftag *buftag; + union kmem_bufctl *bufctl; + void *addr; + + buftag = kmem_buf_to_buftag(buf, cache); + + if (buftag->state != KMEM_BUFTAG_FREE) + kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag); + + addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN, KMEM_UNINIT_PATTERN, + cache->bufctl_dist); + + if (addr != NULL) + kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr); + + addr = buf + cache->obj_size; + memset(addr, KMEM_REDZONE_BYTE, cache->redzone_pad); + + bufctl = kmem_buf_to_bufctl(buf, cache); + bufctl->redzone = KMEM_REDZONE_WORD; + buftag->state = KMEM_BUFTAG_ALLOC; + + if (construct && (cache->ctor != NULL)) + cache->ctor(buf); +} + +vm_offset_t kmem_cache_alloc(struct kmem_cache *cache) +{ + int filled; + void *buf; + +#if SLAB_USE_CPU_POOLS + struct kmem_cpu_pool *cpu_pool; + + cpu_pool = kmem_cpu_pool_get(cache); + + if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL) + goto slab_alloc; + + simple_lock(&cpu_pool->lock); + +fast_alloc: + if (likely(cpu_pool->nr_objs > 0)) { + buf = kmem_cpu_pool_pop(cpu_pool); + simple_unlock(&cpu_pool->lock); + + if (cpu_pool->flags & KMEM_CF_VERIFY) + kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT); + + return (vm_offset_t)buf; + } + + if (cpu_pool->array != NULL) { + filled = kmem_cpu_pool_fill(cpu_pool, cache); + + if (!filled) { + simple_unlock(&cpu_pool->lock); + + filled = kmem_cache_grow(cache); + + if (!filled) + return 0; + + simple_lock(&cpu_pool->lock); + } + + goto fast_alloc; + } + + simple_unlock(&cpu_pool->lock); +#endif /* SLAB_USE_CPU_POOLS */ + +slab_alloc: + simple_lock(&cache->lock); + buf = kmem_cache_alloc_from_slab(cache); + simple_unlock(&cache->lock); + + if (buf == NULL) { + filled = kmem_cache_grow(cache); + + if (!filled) + return 0; + + goto slab_alloc; + } + + if (cache->flags & KMEM_CF_VERIFY) + kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT); + + if (cache->ctor != NULL) + cache->ctor(buf); + + return (vm_offset_t)buf; +} + +static void kmem_cache_free_verify(struct kmem_cache *cache, void *buf) +{ + struct rbtree_node *node; + struct kmem_buftag *buftag; + struct kmem_slab *slab; + union kmem_bufctl *bufctl; + unsigned char *redzone_byte; + unsigned long slabend; + + simple_lock(&cache->lock); + node = rbtree_lookup_nearest(&cache->active_slabs, buf, + kmem_slab_cmp_lookup, RBTREE_LEFT); + simple_unlock(&cache->lock); + + if (node == NULL) + kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + + slab = rbtree_entry(node, struct kmem_slab, tree_node); + slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE); + + if ((unsigned long)buf >= slabend) + kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + + if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size) + != 0) + kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + + /* + * As the buffer address is valid, accessing its buftag is safe. + */ + buftag = kmem_buf_to_buftag(buf, cache); + + if (buftag->state != KMEM_BUFTAG_ALLOC) { + if (buftag->state == KMEM_BUFTAG_FREE) + kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE, NULL); + else + kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag); + } + + redzone_byte = buf + cache->obj_size; + bufctl = kmem_buf_to_bufctl(buf, cache); + + while (redzone_byte < (unsigned char *)bufctl) { + if (*redzone_byte != KMEM_REDZONE_BYTE) + kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte); + + redzone_byte++; + } + + if (bufctl->redzone != KMEM_REDZONE_WORD) { + unsigned long word; + + word = KMEM_REDZONE_WORD; + redzone_byte = kmem_buf_verify_bytes(&bufctl->redzone, &word, + sizeof(bufctl->redzone)); + kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte); + } + + kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist); + buftag->state = KMEM_BUFTAG_FREE; +} + +void kmem_cache_free(struct kmem_cache *cache, vm_offset_t obj) +{ +#if SLAB_USE_CPU_POOLS + struct kmem_cpu_pool *cpu_pool; + void **array; + + cpu_pool = kmem_cpu_pool_get(cache); + + if (cpu_pool->flags & KMEM_CF_VERIFY) { +#else /* SLAB_USE_CPU_POOLS */ + if (cache->flags & KMEM_CF_VERIFY) { +#endif /* SLAB_USE_CPU_POOLS */ + kmem_cache_free_verify(cache, (void *)obj); + } + +#if SLAB_USE_CPU_POOLS + if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL) + goto slab_free; + + simple_lock(&cpu_pool->lock); + +fast_free: + if (likely(cpu_pool->nr_objs < cpu_pool->size)) { + kmem_cpu_pool_push(cpu_pool, (void *)obj); + simple_unlock(&cpu_pool->lock); + return; + } + + if (cpu_pool->array != NULL) { + kmem_cpu_pool_drain(cpu_pool, cache); + goto fast_free; + } + + simple_unlock(&cpu_pool->lock); + + array = (void *)kmem_cache_alloc(cache->cpu_pool_type->array_cache); + + if (array != NULL) { + simple_lock(&cpu_pool->lock); + + /* + * Another thread may have built the CPU pool while the lock was + * dropped. + */ + if (cpu_pool->array != NULL) { + simple_unlock(&cpu_pool->lock); + kmem_cache_free(cache->cpu_pool_type->array_cache, + (vm_offset_t)array); + goto fast_free; + } + + kmem_cpu_pool_build(cpu_pool, cache, array); + goto fast_free; + } + +slab_free: +#endif /* SLAB_USE_CPU_POOLS */ + + kmem_cache_free_to_slab(cache, (void *)obj); +} + +void slab_collect(void) +{ + struct kmem_cache *cache; + + if (sched_tick <= (kmem_gc_last_tick + KMEM_GC_INTERVAL)) + return; + + kmem_gc_last_tick = sched_tick; + + simple_lock(&mem_cache_list_lock); + + list_for_each_entry(&kmem_cache_list, cache, node) + kmem_cache_reap(cache); + + simple_unlock(&mem_cache_list_lock); +} + +void slab_bootstrap(void) +{ + /* Make sure a bufctl can always be stored in a buffer */ + assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN); + + list_init(&kmem_cache_list); + simple_lock_init(&kmem_cache_list_lock); +} + +void slab_init(void) +{ + vm_offset_t min, max; + +#if SLAB_USE_CPU_POOLS + struct kmem_cpu_pool_type *cpu_pool_type; + char name[KMEM_CACHE_NAME_SIZE]; + size_t i, size; +#endif /* SLAB_USE_CPU_POOLS */ + + kmem_submap(kmem_map, kernel_map, &min, &max, KMEM_MAP_SIZE, FALSE); + +#if SLAB_USE_CPU_POOLS + for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) { + cpu_pool_type = &kmem_cpu_pool_types[i]; + cpu_pool_type->array_cache = &kmem_cpu_array_caches[i]; + sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size); + size = sizeof(void *) * cpu_pool_type->array_size; + kmem_cache_init(cpu_pool_type->array_cache, name, size, + cpu_pool_type->array_align, NULL, NULL, NULL, 0); + } +#endif /* SLAB_USE_CPU_POOLS */ + + /* + * Prevent off slab data for the slab cache to avoid infinite recursion. + */ + kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab), + 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB); +} + +static vm_offset_t kalloc_pagealloc(vm_size_t size) +{ + vm_offset_t addr; + kern_return_t kr; + + kr = kmem_alloc_wired(kalloc_map, &addr, size); + + if (kr != KERN_SUCCESS) + return 0; + + return addr; +} + +static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size) +{ + kmem_free(kalloc_map, ptr, size); +} + +void kalloc_init(void) +{ + char name[KMEM_CACHE_NAME_SIZE]; + size_t i, size; + vm_offset_t min, max; + + kmem_submap(kalloc_map, kernel_map, &min, &max, KALLOC_MAP_SIZE, FALSE); + + size = 1 << KALLOC_FIRST_SHIFT; + + for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) { + sprintf(name, "kalloc_%u", size); + kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, + kalloc_pagealloc, kalloc_pagefree, 0); + size <<= 1; + } +} + +/* + * Return the kalloc cache index matching the given allocation size, which + * must be strictly greater than 0. + */ +static inline size_t kalloc_get_index(unsigned long size) +{ + assert(size != 0); + + size = (size - 1) >> KALLOC_FIRST_SHIFT; + + if (size == 0) + return 0; + else + return (sizeof(long) * 8) - __builtin_clzl(size); +} + +static void kalloc_verify(struct kmem_cache *cache, void *buf, size_t size) +{ + size_t redzone_size; + void *redzone; + + assert(size <= cache->obj_size); + + redzone = buf + size; + redzone_size = cache->obj_size - size; + memset(redzone, KMEM_REDZONE_BYTE, redzone_size); +} + +vm_offset_t kalloc(vm_size_t size) +{ + size_t index; + void *buf; + + if (size == 0) + return 0; + + index = kalloc_get_index(size); + + if (index < ARRAY_SIZE(kalloc_caches)) { + struct kmem_cache *cache; + + cache = &kalloc_caches[index]; + buf = (void *)kmem_cache_alloc(cache); + + if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY)) + kalloc_verify(cache, buf, size); + } else + buf = (void *)kalloc_pagealloc(size); + + return (vm_offset_t)buf; +} + +static void kfree_verify(struct kmem_cache *cache, void *buf, size_t size) +{ + unsigned char *redzone_byte, *redzone_end; + + assert(size <= cache->obj_size); + + redzone_byte = buf + size; + redzone_end = buf + cache->obj_size; + + while (redzone_byte < redzone_end) { + if (*redzone_byte != KMEM_REDZONE_BYTE) + kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte); + + redzone_byte++; + } +} + +void kfree(vm_offset_t data, vm_size_t size) +{ + size_t index; + + if ((data == 0) || (size == 0)) + return; + + index = kalloc_get_index(size); + + if (index < ARRAY_SIZE(kalloc_caches)) { + struct kmem_cache *cache; + + cache = &kalloc_caches[index]; + + if (cache->flags & KMEM_CF_VERIFY) + kfree_verify(cache, (void *)data, size); + + kmem_cache_free(cache, data); + } else { + kalloc_pagefree(data, size); + } +} + +#if MACH_DEBUG +kern_return_t host_slab_info(host_t host, cache_info_array_t *infop, + unsigned int *infoCntp) +{ + struct kmem_cache *cache; + cache_info_t *info; + unsigned int i, nr_caches; + vm_size_t info_size = info_size; + kern_return_t kr; + + if (host == HOST_NULL) + return KERN_INVALID_HOST; + + /* + * Assume the cache list is unaltered once the kernel is ready. + */ + + simple_lock(&mem_cache_list_lock); + nr_caches = kmem_nr_caches; + simple_unlock(&mem_cache_list_lock); + + if (nr_caches <= *infoCntp) + info = *infop; + else { + vm_offset_t info_addr; + + info_size = round_page(nr_caches * sizeof(*info)); + kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size); + + if (kr != KERN_SUCCESS) + return kr; + + info = (cache_info_t *)info_addr; + } + + if (info == NULL) + return KERN_RESOURCE_SHORTAGE; + + i = 0; + + list_for_each_entry(&kmem_cache_list, cache, node) { + simple_lock(&cache_lock); + info[i].flags = ((cache->flags & KMEM_CF_NO_CPU_POOL) + ? CACHE_FLAGS_NO_CPU_POOL : 0) + | ((cache->flags & KMEM_CF_SLAB_EXTERNAL) + ? CACHE_FLAGS_SLAB_EXTERNAL : 0) + | ((cache->flags & KMEM_CF_NO_RECLAIM) + ? CACHE_FLAGS_NO_RECLAIM : 0) + | ((cache->flags & KMEM_CF_VERIFY) + ? CACHE_FLAGS_VERIFY : 0) + | ((cache->flags & KMEM_CF_DIRECT) + ? CACHE_FLAGS_DIRECT : 0); +#if SLAB_USE_CPU_POOLS + info[i].cpu_pool_size = cache->cpu_pool_type->array_size; +#else /* SLAB_USE_CPU_POOLS */ + info[i].cpu_pool_size = 0; +#endif /* SLAB_USE_CPU_POOLS */ + info[i].obj_size = cache->obj_size; + info[i].align = cache->align; + info[i].buf_size = cache->buf_size; + info[i].slab_size = cache->slab_size; + info[i].bufs_per_slab = cache->bufs_per_slab; + info[i].nr_objs = cache->nr_objs; + info[i].nr_bufs = cache->nr_bufs; + info[i].nr_slabs = cache->nr_slabs; + info[i].nr_free_slabs = cache->nr_free_slabs; + strncpy(info[i].name, cache->name, sizeof(info[i].name)); + info[i].name[sizeof(info[i].name) - 1] = '\0'; + simple_unlock(&cache->lock); + + i++; + } + + if (info != *infop) { + vm_map_copy_t copy; + vm_size_t used; + + used = nr_caches * sizeof(*info); + + if (used != info_size) + memset((char *)info + used, 0, info_size - used); + + kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t)info, used, TRUE, + ©); + + assert(kr == KERN_SUCCESS); + *infop = (cache_info_t *)copy; + } + + *infoCntp = nr_caches; + + return KERN_SUCCESS; +} +#endif /* MACH_DEBUG */ -- cgit v1.2.3 From 4ae07e7e07cf77f1b7ce06bebf1057bfe4a16c54 Mon Sep 17 00:00:00 2001 From: Samuel Thibault Date: Fri, 9 Mar 2012 01:04:52 +0100 Subject: Use unsigned long for addresses and sizes TODO: remonter formats * i386/include/mach/i386/vm_types.h (vm_offset_t): Define to unsigned long. (signed32_t): Define to signed int. (unsigned32_t): Define to unsigned int. * i386/include/mach/sa/stdarg.h (__va_size): Use sizeof(unsigned long)-1 instead of 3. * include/mach/port.h (mach_port_t): Define to vm_offset_t instead of natural_t. * include/sys/types.h (size_t): Define to unsigned long instead of natural_t. * linux/src/include/asm-i386/posix_types.h (__kernel_size_t): Define to unsigned long. (__kernel_ssize_t): Define to long. * linux/src/include/linux/stddef.h (size_t): Define to unsigned long. * device/dev_pager.c (dev_pager_hash): Cast port to vm_offset_t insted of natural_t. (device_pager_data_request): Fix format. * device/ds_routines.c (ds_no_senders): Fix format. * i386/i386/io_map.c (io_map): Likewise. * i386/i386at/autoconf.c (take_dev_irq): Likewise. * i386/i386at/com.c (comattach): Likewise. * i386/i386at/lpr.c (lprattach): Likewise. * i386/i386at/model_dep.c (mem_size_init, mem_size_init, c_boot_entry): Likewise. * i386/intel/pmap.c (pmap_enter): Likewise. * ipc/ipc_notify.c (ipc_notify_port_deleted, ipc_notify_msg_accepted, ipc_notify_dead_name): Likewise. * ipc/mach_port.c (mach_port_destroy, mach_port_deallocate): Likewise. * kern/ipc_kobject.c (ipc_kobject_destroy): Likewise. * kern/slab.c (kalloc_init): Likewise. * vm/vm_fault.c (vm_fault_page): Likewise. * vm/vm_map.c (vm_map_pmap_enter): Likewise. * xen/block.c (device_read): Likewise. * device/net_io.c (bpf_match): Take unsigned long * instead of unsigned int *. (bpf_do_filter): Make mem unsigned long instead of long. * i386/i386/ktss.c (ktss_init): Cast pointer to unsigned long instead of unsigned. * i386/i386/pcb.c (stack_attach, switch_ktss): Cast pointers to long instead of int. * i386/i386/trap.c (dump_ss): Likewise. * ipc/ipc_hash.c (IH_LOCAL_HASH): Cast object to vm_offset_t. * ipc/mach_msg.c (mach_msg_receive, mach_msg_receive_continue): Cast kmsg to vm_offset_t instead of natural_t. * kern/pc_sample.c (take_pc_sample): Cast to vm_offset_t instead of natural_t. * kern/boot_script.c (sym, arg): Set type of `val' field to long instead of int. (create_task, builtin_symbols, boot_script_parse_line, boot_script_define_function): Cast to long instead of int. * kern/bootstrap.c (bootstrap_create): Likewise. * kern/sched_prim.c (decl_simple_lock_data): Likewise. * kern/printf.c (vsnprintf): Set size type to size_t. * kern/printf.h (vsnprintf): Likewise. * vm/vm_map.h (kentry_data_size): Fix type to vm_size_t. * vm/vm_object.c (vm_object_pmap_protect_by_page): Fix size parameter type to vm_size_t. --- device/dev_pager.c | 4 ++-- device/ds_routines.c | 2 +- device/net_io.c | 6 +++--- i386/i386/io_map.c | 2 +- i386/i386/ktss.c | 2 +- i386/i386/pcb.c | 12 ++++++------ i386/i386/trap.c | 4 ++-- i386/i386at/autoconf.c | 2 +- i386/i386at/com.c | 2 +- i386/i386at/lpr.c | 2 +- i386/i386at/model_dep.c | 6 +++--- i386/include/mach/i386/vm_types.h | 6 +++--- i386/include/mach/sa/stdarg.h | 2 +- i386/intel/pmap.c | 2 +- include/mach/port.h | 2 +- include/sys/types.h | 2 +- ipc/ipc_hash.c | 2 +- ipc/ipc_notify.c | 6 +++--- ipc/mach_msg.c | 4 ++-- ipc/mach_port.c | 4 ++-- kern/boot_script.c | 21 +++++++++++---------- kern/bootstrap.c | 12 ++++++------ kern/ipc_kobject.c | 2 +- kern/pc_sample.c | 2 +- kern/printf.c | 2 +- kern/printf.h | 2 +- kern/sched_prim.c | 2 +- kern/slab.c | 2 +- linux/src/include/asm-i386/posix_types.h | 4 ++-- linux/src/include/linux/stddef.h | 2 +- vm/vm_fault.c | 2 +- vm/vm_map.c | 2 +- vm/vm_map.h | 2 +- vm/vm_object.c | 2 +- xen/block.c | 2 +- 35 files changed, 68 insertions(+), 67 deletions(-) (limited to 'kern/slab.c') diff --git a/device/dev_pager.c b/device/dev_pager.c index bc58a155..e0ca2c76 100644 --- a/device/dev_pager.c +++ b/device/dev_pager.c @@ -165,7 +165,7 @@ decl_simple_lock_data(, dev_pager_hash_lock) #define dev_pager_hash(name_port) \ - (((natural_t)(name_port) & 0xffffff) % DEV_PAGER_HASH_COUNT) + (((vm_offset_t)(name_port) & 0xffffff) % DEV_PAGER_HASH_COUNT) void dev_pager_hash_init(void) { @@ -336,7 +336,7 @@ kern_return_t device_pager_data_request( #endif /* lint */ if (device_pager_debug) - printf("(device_pager)data_request: pager=%p, offset=0x%x, length=0x%x\n", + printf("(device_pager)data_request: pager=%p, offset=0x%lx, length=0x%x\n", pager, offset, length); ds = dev_pager_hash_lookup((ipc_port_t)pager); diff --git a/device/ds_routines.c b/device/ds_routines.c index 5a6fdd2d..68589dee 100644 --- a/device/ds_routines.c +++ b/device/ds_routines.c @@ -1449,7 +1449,7 @@ static void ds_no_senders(notification) mach_no_senders_notification_t *notification; { - printf("ds_no_senders called! device_port=0x%x count=%d\n", + printf("ds_no_senders called! device_port=0x%lx count=%d\n", notification->not_header.msgh_remote_port, notification->not_count); } diff --git a/device/net_io.c b/device/net_io.c index 52a07163..4ebf9964 100644 --- a/device/net_io.c +++ b/device/net_io.c @@ -394,7 +394,7 @@ int net_add_q_info (ipc_port_t rcv_port); int bpf_match ( net_hash_header_t hash, int n_keys, - unsigned int *keys, + unsigned long *keys, net_hash_entry_t **hash_headpp, net_hash_entry_t *entpp); @@ -1638,7 +1638,7 @@ bpf_do_filter(infp, p, wirelen, header, hlen, hash_headpp, entpp) register unsigned long A, X; register int k; - long mem[BPF_MEMWORDS]; + unsigned long mem[BPF_MEMWORDS]; /* Generic pointer to either HEADER or P according to the specified offset. */ char *data = NULL; @@ -2032,7 +2032,7 @@ int bpf_match (hash, n_keys, keys, hash_headpp, entpp) net_hash_header_t hash; register int n_keys; - register unsigned int *keys; + register unsigned long *keys; net_hash_entry_t **hash_headpp, *entpp; { register net_hash_entry_t head, entp; diff --git a/i386/i386/io_map.c b/i386/i386/io_map.c index 5b77552d..b095f224 100644 --- a/i386/i386/io_map.c +++ b/i386/i386/io_map.c @@ -49,7 +49,7 @@ io_map(phys_addr, size) */ start = kernel_virtual_start; kernel_virtual_start += round_page(size); - printf("stealing kernel virtual addresses %08x-%08x\n", start, kernel_virtual_start); + printf("stealing kernel virtual addresses %08lx-%08lx\n", start, kernel_virtual_start); } else { (void) kmem_alloc_pageable(kernel_map, &start, round_page(size)); diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c index 66432f3e..e2c44257 100644 --- a/i386/i386/ktss.c +++ b/i386/i386/ktss.c @@ -48,7 +48,7 @@ ktss_init() #ifdef MACH_XEN /* Xen won't allow us to do any I/O by default anyway, just register * exception stack */ - if (hyp_stack_switch(KERNEL_DS, (unsigned)(exception_stack+1024))) + if (hyp_stack_switch(KERNEL_DS, (unsigned long)(exception_stack+1024))) panic("couldn't register exception stack\n"); #else /* MACH_XEN */ /* Initialize the master TSS descriptor. */ diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c index e065dbb2..dfe0444f 100644 --- a/i386/i386/pcb.c +++ b/i386/i386/pcb.c @@ -93,10 +93,10 @@ void stack_attach(thread, stack, continuation) * This function will not return normally, * so we don`t have to worry about a return address. */ - STACK_IKS(stack)->k_eip = (int) Thread_continue; - STACK_IKS(stack)->k_ebx = (int) continuation; - STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack); - STACK_IKS(stack)->k_ebp = (int) 0; + STACK_IKS(stack)->k_eip = (long) Thread_continue; + STACK_IKS(stack)->k_ebx = (long) continuation; + STACK_IKS(stack)->k_esp = (long) STACK_IEL(stack); + STACK_IKS(stack)->k_ebp = (long) 0; /* * Point top of kernel stack to user`s registers. @@ -152,8 +152,8 @@ void switch_ktss(pcb) */ pcb_stack_top = (pcb->iss.efl & EFL_VM) - ? (int) (&pcb->iss + 1) - : (int) (&pcb->iss.v86_segs); + ? (long) (&pcb->iss + 1) + : (long) (&pcb->iss.v86_segs); #ifdef MACH_XEN /* No IO mask here */ diff --git a/i386/i386/trap.c b/i386/i386/trap.c index 01c83f50..d594907e 100644 --- a/i386/i386/trap.c +++ b/i386/i386/trap.c @@ -217,8 +217,8 @@ dump_ss(regs); printf("now %08x\n", subcode); #endif if (trunc_page(subcode) == 0 - || (subcode >= (int)_start - && subcode < (int)etext)) { + || (subcode >= (long)_start + && subcode < (long)etext)) { printf("Kernel page fault at address 0x%x, " "eip = 0x%x\n", subcode, regs->eip); diff --git a/i386/i386at/autoconf.c b/i386/i386at/autoconf.c index d1f2863f..93c71412 100644 --- a/i386/i386at/autoconf.c +++ b/i386/i386at/autoconf.c @@ -135,7 +135,7 @@ void take_dev_irq( printf("The device below will clobber IRQ %d.\n", pic); printf("You have two devices at the same IRQ.\n"); printf("This won't work. Reconfigure your hardware and try again.\n"); - printf("%s%d: port = %x, spl = %d, pic = %d.\n", + printf("%s%d: port = %lx, spl = %ld, pic = %d.\n", dev->name, dev->unit, dev->address, dev->sysdep, dev->sysdep1); while (1); diff --git a/i386/i386at/com.c b/i386/i386at/com.c index f02c7f83..165b0fab 100644 --- a/i386/i386at/com.c +++ b/i386/i386at/com.c @@ -229,7 +229,7 @@ comattach(struct bus_device *dev) u_short addr = dev->address; take_dev_irq(dev); - printf(", port = %x, spl = %d, pic = %d. (DOS COM%d)", + printf(", port = %lx, spl = %ld, pic = %d. (DOS COM%d)", dev->address, dev->sysdep, dev->sysdep1, unit+1); /* comcarrier[unit] = addr->flags;*/ diff --git a/i386/i386at/lpr.c b/i386/i386at/lpr.c index 468608c7..b69e813e 100644 --- a/i386/i386at/lpr.c +++ b/i386/i386at/lpr.c @@ -102,7 +102,7 @@ void lprattach(struct bus_device *dev) u_short addr = (u_short) dev->address; take_dev_irq(dev); - printf(", port = %x, spl = %d, pic = %d.", + printf(", port = %lx, spl = %ld, pic = %d.", dev->address, dev->sysdep, dev->sysdep1); lprinfo[unit] = dev; diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c index 980708cf..2f785207 100644 --- a/i386/i386at/model_dep.c +++ b/i386/i386at/model_dep.c @@ -284,7 +284,7 @@ mem_size_init(void) phys_last_addr = phys_last_kb * 0x400; #endif /* MACH_HYP */ - printf("AT386 boot: physical memory from 0x%x to 0x%x\n", + printf("AT386 boot: physical memory from 0x%lx to 0x%lx\n", phys_first_addr, phys_last_addr); /* Reserve room for virtual mappings. @@ -292,7 +292,7 @@ mem_size_init(void) max_phys_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS - VM_KERNEL_MAP_SIZE; if (phys_last_addr - phys_first_addr > max_phys_size) { phys_last_addr = phys_first_addr + max_phys_size; - printf("Truncating memory size to %dMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024)); + printf("Truncating memory size to %luMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024)); /* TODO Xen: be nice, free lost memory */ } @@ -514,7 +514,7 @@ void c_boot_entry(vm_offset_t bi) strtab_size = (vm_offset_t)phystokv(boot_info.syms.a.strsize); kern_sym_end = kern_sym_start + 4 + symtab_size + strtab_size; - printf("kernel symbol table at %08x-%08x (%d,%d)\n", + printf("kernel symbol table at %08lx-%08lx (%d,%d)\n", kern_sym_start, kern_sym_end, symtab_size, strtab_size); } diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h index d54008ef..1439940b 100644 --- a/i386/include/mach/i386/vm_types.h +++ b/i386/include/mach/i386/vm_types.h @@ -73,7 +73,7 @@ typedef unsigned int uint32; * A vm_offset_t is a type-neutral pointer, * e.g. an offset into a virtual memory space. */ -typedef natural_t vm_offset_t; +typedef unsigned long vm_offset_t; typedef vm_offset_t * vm_offset_array_t; /* @@ -88,11 +88,11 @@ typedef natural_t vm_size_t; */ typedef signed char signed8_t; typedef signed short signed16_t; -typedef signed long signed32_t; +typedef signed int signed32_t; typedef signed long long signed64_t; typedef unsigned char unsigned8_t; typedef unsigned short unsigned16_t; -typedef unsigned long unsigned32_t; +typedef unsigned int unsigned32_t; typedef unsigned long long unsigned64_t; typedef float float32_t; typedef double float64_t; diff --git a/i386/include/mach/sa/stdarg.h b/i386/include/mach/sa/stdarg.h index ba0f78a1..550fec4f 100644 --- a/i386/include/mach/sa/stdarg.h +++ b/i386/include/mach/sa/stdarg.h @@ -39,7 +39,7 @@ typedef __builtin_va_list va_list; #else -#define __va_size(type) ((sizeof(type)+3) & ~0x3) +#define __va_size(type) ((sizeof(type)+sizeof(unsigned long)-1) & ~(sizeof(unsigned long)-1)) #ifndef _VA_LIST_ #define _VA_LIST_ diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index d6e18e50..86d2415d 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -1747,7 +1747,7 @@ void pmap_enter(pmap, v, pa, prot, wired) vm_offset_t old_pa; assert(pa != vm_page_fictitious_addr); -if (pmap_debug) printf("pmap(%x, %x)\n", v, pa); +if (pmap_debug) printf("pmap(%lx, %lx)\n", v, pa); if (pmap == PMAP_NULL) return; diff --git a/include/mach/port.h b/include/mach/port.h index 6dafb2f3..53f60716 100644 --- a/include/mach/port.h +++ b/include/mach/port.h @@ -39,7 +39,7 @@ #include -typedef natural_t mach_port_t; +typedef vm_offset_t mach_port_t; typedef mach_port_t *mach_port_array_t; typedef int *rpc_signature_info_t; diff --git a/include/sys/types.h b/include/sys/types.h index d79e077c..19e7b242 100644 --- a/include/sys/types.h +++ b/include/sys/types.h @@ -30,7 +30,7 @@ #ifndef _SIZE_T #define _SIZE_T -typedef natural_t size_t; +typedef unsigned long size_t; #endif #ifndef _SSIZE_T diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c index ad05016c..5eec58cb 100644 --- a/ipc/ipc_hash.c +++ b/ipc/ipc_hash.c @@ -326,7 +326,7 @@ ipc_hash_global_delete( */ #define IH_LOCAL_HASH(obj, size) \ - ((((mach_port_index_t) (obj)) >> 6) % (size)) + ((((mach_port_index_t) (vm_offset_t) (obj)) >> 6) % (size)) /* * Routine: ipc_hash_local_lookup diff --git a/ipc/ipc_notify.c b/ipc/ipc_notify.c index d06346ea..25fa421b 100644 --- a/ipc/ipc_notify.c +++ b/ipc/ipc_notify.c @@ -264,7 +264,7 @@ ipc_notify_port_deleted(port, name) kmsg = ikm_alloc(sizeof *n); if (kmsg == IKM_NULL) { - printf("dropped port-deleted (0x%p, 0x%x)\n", port, name); + printf("dropped port-deleted (0x%p, 0x%lx)\n", port, name); ipc_port_release_sonce(port); return; } @@ -298,7 +298,7 @@ ipc_notify_msg_accepted(port, name) kmsg = ikm_alloc(sizeof *n); if (kmsg == IKM_NULL) { - printf("dropped msg-accepted (0x%p, 0x%x)\n", port, name); + printf("dropped msg-accepted (0x%p, 0x%lx)\n", port, name); ipc_port_release_sonce(port); return; } @@ -437,7 +437,7 @@ ipc_notify_dead_name(port, name) kmsg = ikm_alloc(sizeof *n); if (kmsg == IKM_NULL) { - printf("dropped dead-name (0x%p, 0x%x)\n", port, name); + printf("dropped dead-name (0x%p, 0x%lx)\n", port, name); ipc_port_release_sonce(port); return; } diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c index 43ae918a..00ab085b 100644 --- a/ipc/mach_msg.c +++ b/ipc/mach_msg.c @@ -218,7 +218,7 @@ mach_msg_receive(msg, option, rcv_size, rcv_name, time_out, notify) if (mr != MACH_MSG_SUCCESS) { if (mr == MACH_RCV_TOO_LARGE) { mach_msg_size_t real_size = - (mach_msg_size_t) (natural_t) kmsg; + (mach_msg_size_t) (vm_offset_t) kmsg; assert(real_size > rcv_size); @@ -309,7 +309,7 @@ mach_msg_receive_continue(void) if (mr != MACH_MSG_SUCCESS) { if (mr == MACH_RCV_TOO_LARGE) { mach_msg_size_t real_size = - (mach_msg_size_t) (natural_t) kmsg; + (mach_msg_size_t) (vm_offset_t) kmsg; assert(real_size > rcv_size); diff --git a/ipc/mach_port.c b/ipc/mach_port.c index c5688c90..d0310b55 100644 --- a/ipc/mach_port.c +++ b/ipc/mach_port.c @@ -571,7 +571,7 @@ mach_port_destroy( kr = ipc_right_lookup_write(space, name, &entry); if (kr != KERN_SUCCESS) { if (name != MACH_PORT_NULL && name != MACH_PORT_DEAD && space == current_space()) { - printf("task %p destroying an invalid port %u, most probably a bug.\n", current_task(), name); + printf("task %p destroying an invalid port %lu, most probably a bug.\n", current_task(), name); if (mach_port_deallocate_debug) SoftDebugger("mach_port_deallocate"); } @@ -615,7 +615,7 @@ mach_port_deallocate( kr = ipc_right_lookup_write(space, name, &entry); if (kr != KERN_SUCCESS) { if (name != MACH_PORT_NULL && name != MACH_PORT_DEAD && space == current_space()) { - printf("task %p deallocating an invalid port %u, most probably a bug.\n", current_task(), name); + printf("task %p deallocating an invalid port %lu, most probably a bug.\n", current_task(), name); if (mach_port_deallocate_debug) SoftDebugger("mach_port_deallocate"); } diff --git a/kern/boot_script.c b/kern/boot_script.c index 93491267..aa6833bb 100644 --- a/kern/boot_script.c +++ b/kern/boot_script.c @@ -17,7 +17,7 @@ struct sym int type; /* Symbol value. */ - int val; + long val; /* For function symbols; type of value returned by function. */ int ret_type; @@ -44,7 +44,7 @@ struct arg int type; /* Argument value. */ - int val; + long val; }; /* List of commands. */ @@ -70,7 +70,7 @@ static int create_task (struct cmd *cmd, int *val) { int err = boot_script_task_create (cmd); - *val = (int) cmd->task; + *val = (long) cmd->task; return err; } @@ -91,9 +91,9 @@ prompt_resume_task (struct cmd *cmd, int *val) /* List of builtin symbols. */ static struct sym builtin_symbols[] = { - { "task-create", VAL_FUNC, (int) create_task, VAL_TASK, 0 }, - { "task-resume", VAL_FUNC, (int) resume_task, VAL_NONE, 1 }, - { "prompt-task-resume", VAL_FUNC, (int) prompt_resume_task, VAL_NONE, 1 }, + { "task-create", VAL_FUNC, (long) create_task, VAL_TASK, 0 }, + { "task-resume", VAL_FUNC, (long) resume_task, VAL_NONE, 1 }, + { "prompt-task-resume", VAL_FUNC, (long) prompt_resume_task, VAL_NONE, 1 }, }; #define NUM_BUILTIN (sizeof (builtin_symbols) / sizeof (builtin_symbols[0])) @@ -294,7 +294,8 @@ boot_script_parse_line (void *hook, char *cmdline) for (p += 2;;) { char c; - int i, val, type; + int i, type; + long val; struct sym *s; /* Parse symbol name. */ @@ -349,7 +350,7 @@ boot_script_parse_line (void *hook, char *cmdline) if (! s->run_on_exec) { (error - = ((*((int (*) (struct cmd *, int *)) s->val)) + = ((*((int (*) (struct cmd *, long *)) s->val)) (cmd, &val))); if (error) goto bad; @@ -371,7 +372,7 @@ boot_script_parse_line (void *hook, char *cmdline) else if (s->type == VAL_NONE) { type = VAL_SYM; - val = (int) s; + val = (long) s; } else { @@ -681,7 +682,7 @@ boot_script_define_function (const char *name, int ret_type, if (sym) { sym->type = VAL_FUNC; - sym->val = (int) func; + sym->val = (long) func; sym->ret_type = ret_type; sym->run_on_exec = ret_type == VAL_NONE; } diff --git a/kern/bootstrap.c b/kern/bootstrap.c index 68f40b4f..2b74073a 100644 --- a/kern/bootstrap.c +++ b/kern/bootstrap.c @@ -154,19 +154,19 @@ void bootstrap_create() /* Initialize boot script variables. We leak these send rights. */ losers = boot_script_set_variable ("host-port", VAL_PORT, - (int)ipc_port_make_send(realhost.host_priv_self)); + (long)ipc_port_make_send(realhost.host_priv_self)); if (losers) panic ("cannot set boot-script variable host-port: %s", boot_script_error_string (losers)); losers = boot_script_set_variable ("device-port", VAL_PORT, - (int) ipc_port_make_send(master_device_port)); + (long) ipc_port_make_send(master_device_port)); if (losers) panic ("cannot set boot-script variable device-port: %s", boot_script_error_string (losers)); losers = boot_script_set_variable ("kernel-command-line", VAL_STR, - (int) kernel_cmdline); + (long) kernel_cmdline); if (losers) panic ("cannot set boot-script variable %s: %s", "kernel-command-line", boot_script_error_string (losers)); @@ -185,12 +185,12 @@ void bootstrap_create() get_compat_strings(flag_string, root_string); losers = boot_script_set_variable ("boot-args", VAL_STR, - (int) flag_string); + (long) flag_string); if (losers) panic ("cannot set boot-script variable %s: %s", "boot-args", boot_script_error_string (losers)); losers = boot_script_set_variable ("root-device", VAL_STR, - (int) root_string); + (long) root_string); if (losers) panic ("cannot set boot-script variable %s: %s", "root-device", boot_script_error_string (losers)); @@ -232,7 +232,7 @@ void bootstrap_create() if (eq == 0) continue; *eq++ = '\0'; - losers = boot_script_set_variable (word, VAL_STR, (int) eq); + losers = boot_script_set_variable (word, VAL_STR, (long) eq); if (losers) panic ("cannot set boot-script variable %s: %s", word, boot_script_error_string (losers)); diff --git a/kern/ipc_kobject.c b/kern/ipc_kobject.c index bd171a7e..c922d7fa 100644 --- a/kern/ipc_kobject.c +++ b/kern/ipc_kobject.c @@ -319,7 +319,7 @@ ipc_kobject_destroy( default: #if MACH_ASSERT - printf("ipc_kobject_destroy: port 0x%p, kobj 0x%x, type %d\n", + printf("ipc_kobject_destroy: port 0x%p, kobj 0x%lx, type %d\n", port, port->ip_kobject, ip_kotype(port)); #endif /* MACH_ASSERT */ break; diff --git a/kern/pc_sample.c b/kern/pc_sample.c index 2cec907b..57002581 100644 --- a/kern/pc_sample.c +++ b/kern/pc_sample.c @@ -57,7 +57,7 @@ void take_pc_sample( pc = interrupted_pc(t); cp->seqno++; sample = &((sampled_pc_t *)cp->buffer)[cp->seqno % MAX_PC_SAMPLES]; - sample->id = (natural_t)t; + sample->id = (vm_offset_t)t; sample->pc = pc; sample->sampletype = flavor; } diff --git a/kern/printf.c b/kern/printf.c index 88a527ba..658493cc 100644 --- a/kern/printf.c +++ b/kern/printf.c @@ -601,7 +601,7 @@ snputc(char c, vm_offset_t arg) } int -vsnprintf(char *buf, int size, const char *fmt, va_list args) +vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { struct vsnprintf_cookie cookie = { .buf = buf, .index = 0, .max_len = size }; diff --git a/kern/printf.h b/kern/printf.h index 22681116..fcf2b3b0 100644 --- a/kern/printf.h +++ b/kern/printf.h @@ -40,7 +40,7 @@ extern void printnum (unsigned long u, int base, vm_offset_t putc_arg); extern int sprintf (char *buf, const char *fmt, ...); -extern int vsnprintf (char *buf, int size, const char *fmt, va_list args); +extern int vsnprintf (char *buf, size_t size, const char *fmt, va_list args); extern int printf (const char *fmt, ...); diff --git a/kern/sched_prim.c b/kern/sched_prim.c index 9d4e8afa..a7b7a4ee 100644 --- a/kern/sched_prim.c +++ b/kern/sched_prim.c @@ -146,7 +146,7 @@ decl_simple_lock_data(, wait_lock[NUMQUEUES]) /* NOTE: we want a small positive integer out of this */ #define wait_hash(event) \ - ((((int)(event) < 0) ? ~(int)(event) : (int)(event)) % NUMQUEUES) + ((((long)(event) < 0) ? ~(long)(event) : (long)(event)) % NUMQUEUES) void wait_queue_init(void) { diff --git a/kern/slab.c b/kern/slab.c index 38413e83..f95ec0bb 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -1381,7 +1381,7 @@ void kalloc_init(void) size = 1 << KALLOC_FIRST_SHIFT; for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) { - sprintf(name, "kalloc_%u", size); + sprintf(name, "kalloc_%lu", size); kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, kalloc_pagealloc, kalloc_pagefree, 0); size <<= 1; diff --git a/linux/src/include/asm-i386/posix_types.h b/linux/src/include/asm-i386/posix_types.h index 712ef70c..6a04605a 100644 --- a/linux/src/include/asm-i386/posix_types.h +++ b/linux/src/include/asm-i386/posix_types.h @@ -15,8 +15,8 @@ typedef long __kernel_off_t; typedef int __kernel_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; typedef int __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_clock_t; diff --git a/linux/src/include/linux/stddef.h b/linux/src/include/linux/stddef.h index c6221e71..488d49c0 100644 --- a/linux/src/include/linux/stddef.h +++ b/linux/src/include/linux/stddef.h @@ -3,7 +3,7 @@ #ifndef _SIZE_T #define _SIZE_T -typedef unsigned int size_t; +typedef unsigned long size_t; #endif #undef NULL diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 10955edd..178f3072 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -657,7 +657,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset, m->offset + object->paging_offset, PAGE_SIZE, access_required)) != KERN_SUCCESS) { if (rc != MACH_SEND_INTERRUPTED) - printf("%s(0x%p, 0x%p, 0x%x, 0x%x, 0x%x) failed, %x\n", + printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n", "memory_object_data_request", object->pager, object->pager_request, diff --git a/vm/vm_map.c b/vm/vm_map.c index 1f062757..8012bcf2 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -711,7 +711,7 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) if (vm_map_pmap_enter_print) { printf("vm_map_pmap_enter:"); - printf("map: %p, addr: %x, object: %p, offset: %x\n", + printf("map: %p, addr: %lx, object: %p, offset: %lx\n", map, addr, object, offset); } diff --git a/vm/vm_map.h b/vm/vm_map.h index 17de5db4..381c7cfd 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -354,7 +354,7 @@ MACRO_END */ extern vm_offset_t kentry_data; -extern vm_offset_t kentry_data_size; +extern vm_size_t kentry_data_size; extern int kentry_count; /* Initialize the module */ extern void vm_map_init(void); diff --git a/vm/vm_object.c b/vm/vm_object.c index d80124aa..f1017086 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -919,7 +919,7 @@ boolean_t vm_object_pmap_protect_by_page = FALSE; void vm_object_pmap_protect( register vm_object_t object, register vm_offset_t offset, - vm_offset_t size, + vm_size_t size, pmap_t pmap, vm_offset_t pmap_start, vm_prot_t prot) diff --git a/xen/block.c b/xen/block.c index fb18b67e..4253ef04 100644 --- a/xen/block.c +++ b/xen/block.c @@ -517,7 +517,7 @@ device_read (void *d, ipc_port_t reply_port, thread_block(NULL); if (err) - printf("error reading %d bytes at sector %d\n", amt, + printf("error reading %d bytes at sector %ld\n", amt, bn + offset / 512); for (i = 0; i < nbpages; i++) -- cgit v1.2.3 From d518d34ce0794da016d223a6a4f1b5a69825ede8 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sun, 22 Apr 2012 17:19:54 +0200 Subject: Fix copyright assignment Maksym and I have assigned copyright to the Free Software Foundation. In addition, restore the original upstream copyrights for correctness. * kern/list.h: Fix copyright assignment. * kern/rbtree.c: Likewise. * kern/rbtree.h: Likewise. * kern/rbtree_i.h: Likewise. * kern/slab.c: Likewise. * kern/slab.h: Likewise. --- kern/list.h | 32 +++++++++++++++++++++----------- kern/rbtree.c | 31 +++++++++++++++++++------------ kern/rbtree.h | 32 +++++++++++++++++++++----------- kern/rbtree_i.h | 31 +++++++++++++++++++------------ kern/slab.c | 27 +++++++++++++++++++++++++-- kern/slab.h | 31 +++++++++++++++++++++++++++++-- 6 files changed, 134 insertions(+), 50 deletions(-) (limited to 'kern/slab.c') diff --git a/kern/list.h b/kern/list.h index de7d1158..03414718 100644 --- a/kern/list.h +++ b/kern/list.h @@ -1,19 +1,29 @@ /* * Copyright (c) 2009, 2010 Richard Braun. + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Simple doubly-linked list. */ #ifndef _KERN_LIST_H diff --git a/kern/rbtree.c b/kern/rbtree.c index 1c04c5c1..f6b89bc4 100644 --- a/kern/rbtree.c +++ b/kern/rbtree.c @@ -1,19 +1,26 @@ /* * Copyright (c) 2010 Richard Braun. + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include diff --git a/kern/rbtree.h b/kern/rbtree.h index b6d62bfe..c75e6858 100644 --- a/kern/rbtree.h +++ b/kern/rbtree.h @@ -1,19 +1,29 @@ /* * Copyright (c) 2010, 2011 Richard Braun. + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Red-black tree. */ #ifndef _KERN_RBTREE_H diff --git a/kern/rbtree_i.h b/kern/rbtree_i.h index 9befc92c..69dfb9d0 100644 --- a/kern/rbtree_i.h +++ b/kern/rbtree_i.h @@ -1,19 +1,26 @@ /* * Copyright (c) 2010, 2011 Richard Braun. + * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _KERN_RBTREE_I_H diff --git a/kern/slab.c b/kern/slab.c index f95ec0bb..5a9cbea1 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -1,6 +1,5 @@ /* - * Copyright (c) 2009, 2010, 2011 Richard Braun. - * Copyright (c) 2011 Maksym Planeta. + * Copyright (c) 2011 Free Software Foundation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,6 +17,30 @@ */ /* + * Copyright (c) 2010, 2011 Richard Braun. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * * Object caching and general purpose memory allocator. * * This allocator is based on the paper "The Slab Allocator: An Object-Caching diff --git a/kern/slab.h b/kern/slab.h index 14c820b9..6abe2dc0 100644 --- a/kern/slab.h +++ b/kern/slab.h @@ -1,6 +1,5 @@ /* - * Copyright (c) 2009, 2010, 2011 Richard Braun. - * Copyright (c) 2011 Maksym Planeta. + * Copyright (c) 2011 Free Software Foundation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,6 +16,34 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ +/* + * Copyright (c) 2010, 2011 Richard Braun. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Object caching memory allocator. + */ + #ifndef _KERN_SLAB_H #define _KERN_SLAB_H -- cgit v1.2.3 From 2d2da4aad582bf1d63554cd9496285787db3e66e Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 7 Jul 2012 07:42:24 +0000 Subject: Merge kalloc_map into kmem_map * ipc/ipc_table.c: Add #include . (ipc_table_alloc): Use kmem_map instead of kalloc_map when allocating a table. (ipc_table_realloc): Likewise for reallocation. (ipc_table_free): Likewise for release. * kern/kalloc.h (kalloc_map): Remove declaration. * kern/slab.c (KMEM_MAP_SIZE): Increase to 128 MiB. (KALLOC_MAP_SIZE): Remove macro. (kalloc_map_store): Remove variable. (kalloc_map): Likewise. (kalloc_pagealloc): Use kmem_map instead of kalloc_map for general purpose allocations. (kalloc_pagefree): Likewise. (kalloc_init): Remove the creation of kalloc_map. --- ipc/ipc_table.c | 7 ++++--- kern/kalloc.h | 2 -- kern/slab.c | 21 ++++----------------- 3 files changed, 8 insertions(+), 22 deletions(-) (limited to 'kern/slab.c') diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c index d5b79045..cbb6a894 100644 --- a/ipc/ipc_table.c +++ b/ipc/ipc_table.c @@ -39,6 +39,7 @@ #include #include #include +#include #include /* @@ -144,7 +145,7 @@ ipc_table_alloc( if (size < PAGE_SIZE) table = kalloc(size); else - if (kmem_alloc(kalloc_map, &table, size) != KERN_SUCCESS) + if (kmem_alloc(kmem_map, &table, size) != KERN_SUCCESS) table = 0; return table; @@ -170,7 +171,7 @@ ipc_table_realloc( { vm_offset_t new_table; - if (kmem_realloc(kalloc_map, old_table, old_size, + if (kmem_realloc(kmem_map, old_table, old_size, &new_table, new_size) != KERN_SUCCESS) new_table = 0; @@ -194,5 +195,5 @@ ipc_table_free( if (size < PAGE_SIZE) kfree(table, size); else - kmem_free(kalloc_map, table, size); + kmem_free(kmem_map, table, size); } diff --git a/kern/kalloc.h b/kern/kalloc.h index 1330b546..004e3a6b 100644 --- a/kern/kalloc.h +++ b/kern/kalloc.h @@ -30,8 +30,6 @@ #include #include -extern vm_map_t kalloc_map; - extern vm_offset_t kalloc (vm_size_t size); extern void kfree (vm_offset_t data, vm_size_t size); diff --git a/kern/slab.c b/kern/slab.c index 5a9cbea1..12e4ff49 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -165,7 +165,7 @@ /* * Size of the VM submap from which default backend functions allocate. */ -#define KMEM_MAP_SIZE (64 * 1024 * 1024) +#define KMEM_MAP_SIZE (128 * 1024 * 1024) /* * Shift for the first kalloc cache size. @@ -177,11 +177,6 @@ */ #define KALLOC_NR_CACHES 13 -/* - * Size of the VM submap for general purpose allocations. - */ -#define KALLOC_MAP_SIZE (64 * 1024 * 1024) - /* * Values the buftag state member can take. */ @@ -283,17 +278,11 @@ static unsigned int kmem_nr_caches; static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock; /* - * VM submap for slab caches (except general purpose allocations). + * VM submap for slab caches. */ static struct vm_map kmem_map_store; vm_map_t kmem_map = &kmem_map_store; -/* - * VM submap for general purpose allocations. - */ -static struct vm_map kalloc_map_store; -vm_map_t kalloc_map = &kalloc_map_store; - /* * Time of the last memory reclaim, in clock ticks. */ @@ -1380,7 +1369,7 @@ static vm_offset_t kalloc_pagealloc(vm_size_t size) vm_offset_t addr; kern_return_t kr; - kr = kmem_alloc_wired(kalloc_map, &addr, size); + kr = kmem_alloc_wired(kmem_map, &addr, size); if (kr != KERN_SUCCESS) return 0; @@ -1390,7 +1379,7 @@ static vm_offset_t kalloc_pagealloc(vm_size_t size) static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size) { - kmem_free(kalloc_map, ptr, size); + kmem_free(kmem_map, ptr, size); } void kalloc_init(void) @@ -1399,8 +1388,6 @@ void kalloc_init(void) size_t i, size; vm_offset_t min, max; - kmem_submap(kalloc_map, kernel_map, &min, &max, KALLOC_MAP_SIZE, FALSE); - size = 1 << KALLOC_FIRST_SHIFT; for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) { -- cgit v1.2.3 From 27150b21c3366d6b474fe3e69140ecac65ab7ed2 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 7 Jul 2012 23:59:42 +0000 Subject: Increase the slab collection interval * kern/slab.c (KMEM_GC_INTERVAL): Increase to 5 seconds. --- kern/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kern/slab.c') diff --git a/kern/slab.c b/kern/slab.c index 12e4ff49..ea38eb0c 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -132,7 +132,7 @@ /* * Time (in seconds) between two garbage collection operations. */ -#define KMEM_GC_INTERVAL (1 * hz) +#define KMEM_GC_INTERVAL (5 * hz) /* * The transfer size of a CPU pool is computed by dividing the pool size by -- cgit v1.2.3 From 8d219eab0dcfbdcf464340630d568c4e16d7acbd Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sun, 8 Jul 2012 00:31:53 +0000 Subject: Fix slab collection timing The slab garbage collector uses sched_tick as its time reference, which is increased every seconds, while the interval is expressed in clock ticks. Use the proper time reference instead. * kern/slab.c (kmem_gc_last_tick): Declare as unsigned long. (slab_collect): Use elapsed_ticks instead of sched_tick. --- kern/slab.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kern/slab.c') diff --git a/kern/slab.c b/kern/slab.c index ea38eb0c..64f1fa8a 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -130,7 +130,7 @@ #define KMEM_BUF_SIZE_THRESHOLD (PAGE_SIZE / 8) /* - * Time (in seconds) between two garbage collection operations. + * Time (in ticks) between two garbage collection operations. */ #define KMEM_GC_INTERVAL (5 * hz) @@ -286,7 +286,7 @@ vm_map_t kmem_map = &kmem_map_store; /* * Time of the last memory reclaim, in clock ticks. */ -static unsigned int kmem_gc_last_tick; +static unsigned long kmem_gc_last_tick; #define kmem_error(format, ...) \ printf("mem: error: %s(): " format "\n", __func__, \ @@ -1312,10 +1312,10 @@ void slab_collect(void) { struct kmem_cache *cache; - if (sched_tick <= (kmem_gc_last_tick + KMEM_GC_INTERVAL)) + if (elapsed_ticks <= (kmem_gc_last_tick + KMEM_GC_INTERVAL)) return; - kmem_gc_last_tick = sched_tick; + kmem_gc_last_tick = elapsed_ticks; simple_lock(&mem_cache_list_lock); -- cgit v1.2.3 From dd961d1cef715b4c1e4fedd2f43fae6703f128ba Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Tue, 8 Jan 2013 00:05:48 +0100 Subject: Add function to dump a raw summary of the slab allocator state The purpose of this function is to allow kernel code to display the state of the slab caches in situations where the host_slab_info RPC wouldn't be available, e.g. before a panic. * kern/slab.c (slab_info): New function. * kern/slab.h: Add declaration for slab_info. --- kern/slab.c | 29 +++++++++++++++++++++++++++++ kern/slab.h | 5 +++++ 2 files changed, 34 insertions(+) (limited to 'kern/slab.c') diff --git a/kern/slab.c b/kern/slab.c index 64f1fa8a..2923f47d 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -1490,6 +1490,35 @@ void kfree(vm_offset_t data, vm_size_t size) } } +void slab_info(void) +{ + struct kmem_cache *cache; + vm_size_t mem_usage, mem_reclaimable; + + printf("cache obj slab bufs objs bufs " + " total reclaimable\n" + "name size size /slab usage count " + " memory memory\n"); + + simple_lock(&kmem_cache_list_lock); + + list_for_each_entry(&kmem_cache_list, cache, node) { + simple_lock(&cache->lock); + + mem_usage = (cache->nr_slabs * cache->slab_size) >> 10; + mem_reclaimable = (cache->nr_free_slabs * cache->slab_size) >> 10; + + printf("%-19s %6lu %3luk %4lu %6lu %6lu %7luk %10luk\n", + cache->name, cache->obj_size, cache->slab_size >> 10, + cache->bufs_per_slab, cache->nr_objs, cache->nr_bufs, + mem_usage, mem_reclaimable); + + simple_unlock(&cache->lock); + } + + simple_unlock(&kmem_cache_list_lock); +} + #if MACH_DEBUG kern_return_t host_slab_info(host_t host, cache_info_array_t *infop, unsigned int *infoCntp) diff --git a/kern/slab.h b/kern/slab.h index 6abe2dc0..47bef218 100644 --- a/kern/slab.h +++ b/kern/slab.h @@ -246,4 +246,9 @@ void slab_init(void); */ void slab_collect(void); +/* + * Display a summary of all kernel caches. + */ +void slab_info(void); + #endif /* _KERN_SLAB_H */ -- cgit v1.2.3 From 64cd914cf46ce29d032a155bde3265ff5339cc61 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Tue, 8 Jan 2013 00:17:02 +0100 Subject: Fix slab cache list locking This problem was overlooked because of simple locks being no-ops. * kern/slab.c (slab_collect): Fix lock name to kmem_cache_list_lock. (host_slab_info): Likewise. --- kern/slab.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kern/slab.c') diff --git a/kern/slab.c b/kern/slab.c index 2923f47d..99d5bca3 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -1317,12 +1317,12 @@ void slab_collect(void) kmem_gc_last_tick = elapsed_ticks; - simple_lock(&mem_cache_list_lock); + simple_lock(&kmem_cache_list_lock); list_for_each_entry(&kmem_cache_list, cache, node) kmem_cache_reap(cache); - simple_unlock(&mem_cache_list_lock); + simple_unlock(&kmem_cache_list_lock); } void slab_bootstrap(void) @@ -1536,9 +1536,9 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop, * Assume the cache list is unaltered once the kernel is ready. */ - simple_lock(&mem_cache_list_lock); + simple_lock(&kmem_cache_list_lock); nr_caches = kmem_nr_caches; - simple_unlock(&mem_cache_list_lock); + simple_unlock(&kmem_cache_list_lock); if (nr_caches <= *infoCntp) info = *infop; -- cgit v1.2.3