diff options
author | Richard Braun <rbraun@sceen.net> | 2016-12-09 01:41:06 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2016-12-09 01:41:06 +0100 |
commit | 39c13b3b84b34e0938220126c8f147d2b0b6ac89 (patch) | |
tree | 92accef33f04f49a01765e00ec026b092ae0c8ca /kern | |
parent | 84c92cd2be8bc4aea6c14a186f79c2277f0fd4aa (diff) |
Force brackets around one-line conditional statements
This change was done using astyle, with a few manual editing here and
there.
Diffstat (limited to 'kern')
-rw-r--r-- | kern/bitmap.c | 28 | ||||
-rw-r--r-- | kern/bitmap.h | 24 | ||||
-rw-r--r-- | kern/condition.c | 9 | ||||
-rw-r--r-- | kern/cpumap.c | 9 | ||||
-rw-r--r-- | kern/error.c | 3 | ||||
-rw-r--r-- | kern/evcnt.c | 3 | ||||
-rw-r--r-- | kern/kmem.c | 144 | ||||
-rw-r--r-- | kern/list.h | 3 | ||||
-rw-r--r-- | kern/llsync.c | 26 | ||||
-rw-r--r-- | kern/llsync.h | 6 | ||||
-rw-r--r-- | kern/log2.h | 3 | ||||
-rw-r--r-- | kern/mutex.h | 9 | ||||
-rw-r--r-- | kern/panic.c | 6 | ||||
-rw-r--r-- | kern/percpu.c | 9 | ||||
-rw-r--r-- | kern/printk.c | 3 | ||||
-rw-r--r-- | kern/rbtree.c | 90 | ||||
-rw-r--r-- | kern/spinlock.h | 3 | ||||
-rw-r--r-- | kern/spinlock_i.h | 3 | ||||
-rw-r--r-- | kern/sprintf.c | 96 | ||||
-rw-r--r-- | kern/sref.c | 60 | ||||
-rw-r--r-- | kern/string.c | 24 | ||||
-rw-r--r-- | kern/task.c | 9 | ||||
-rw-r--r-- | kern/thread.c | 220 | ||||
-rw-r--r-- | kern/thread.h | 3 | ||||
-rw-r--r-- | kern/work.c | 49 | ||||
-rw-r--r-- | kern/work.h | 11 | ||||
-rw-r--r-- | kern/xcall.c | 6 |
27 files changed, 552 insertions, 307 deletions
diff --git a/kern/bitmap.c b/kern/bitmap.c index af19805f..b3117dfb 100644 --- a/kern/bitmap.c +++ b/kern/bitmap.c @@ -31,8 +31,9 @@ bitmap_cmp(const unsigned long *a, const unsigned long *b, int nr_bits) if (n != 0) { rv = memcmp(a, b, n * sizeof(unsigned long)); - if (rv != 0) + if (rv != 0) { return rv; + } nr_bits -= n * LONG_BIT; } @@ -46,19 +47,21 @@ bitmap_cmp(const unsigned long *a, const unsigned long *b, int nr_bits) last_b &= mask; } - if (last_a == last_b) + if (last_a == last_b) { return 0; - else if (last_a < last_b) + } else if (last_a < last_b) { return -1; - else + } else { return 1; + } } static inline unsigned long bitmap_find_next_compute_complement(unsigned long word, int nr_bits) { - if (nr_bits < LONG_BIT) + if (nr_bits < LONG_BIT) { word |= (((unsigned long)-1) << nr_bits); + } return ~word; } @@ -80,27 +83,32 @@ bitmap_find_next_bit(const unsigned long *bm, int nr_bits, int bit, word = *bm; - if (complement) + if (complement) { word = bitmap_find_next_compute_complement(word, nr_bits); + } - if (bit < LONG_BIT) + if (bit < LONG_BIT) { word &= ~(bitmap_mask(bit) - 1); + } for (;;) { bit = __builtin_ffsl(word); - if (bit != 0) + if (bit != 0) { return ((bm - start) * LONG_BIT) + bit - 1; + } bm++; - if (bm >= end) + if (bm >= end) { return -1; + } nr_bits -= LONG_BIT; word = *bm; - if (complement) + if (complement) { word = bitmap_find_next_compute_complement(word, nr_bits); + } } } diff --git a/kern/bitmap.h b/kern/bitmap.h index 8c5980a1..6489b486 100644 --- a/kern/bitmap.h +++ b/kern/bitmap.h @@ -63,8 +63,9 @@ bitmap_copy(unsigned long *dest, const unsigned long *src, int nr_bits) static inline void bitmap_set(unsigned long *bm, int bit) { - if (bit >= LONG_BIT) + if (bit >= LONG_BIT) { bitmap_lookup(bm, bit); + } *bm |= bitmap_mask(bit); } @@ -72,8 +73,9 @@ bitmap_set(unsigned long *bm, int bit) static inline void bitmap_set_atomic(unsigned long *bm, int bit) { - if (bit >= LONG_BIT) + if (bit >= LONG_BIT) { bitmap_lookup(bm, bit); + } atomic_or_ulong(bm, bitmap_mask(bit)); } @@ -81,8 +83,9 @@ bitmap_set_atomic(unsigned long *bm, int bit) static inline void bitmap_clear(unsigned long *bm, int bit) { - if (bit >= LONG_BIT) + if (bit >= LONG_BIT) { bitmap_lookup(bm, bit); + } *bm &= ~bitmap_mask(bit); } @@ -90,8 +93,9 @@ bitmap_clear(unsigned long *bm, int bit) static inline void bitmap_clear_atomic(unsigned long *bm, int bit) { - if (bit >= LONG_BIT) + if (bit >= LONG_BIT) { bitmap_lookup(bm, bit); + } atomic_and_ulong(bm, ~bitmap_mask(bit)); } @@ -99,8 +103,9 @@ bitmap_clear_atomic(unsigned long *bm, int bit) static inline int bitmap_test(const unsigned long *bm, int bit) { - if (bit >= LONG_BIT) + if (bit >= LONG_BIT) { bitmap_lookup(bm, bit); + } return ((*bm & bitmap_mask(bit)) != 0); } @@ -112,8 +117,9 @@ bitmap_and(unsigned long *a, const unsigned long *b, int nr_bits) n = BITMAP_LONGS(nr_bits); - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { a[i] &= b[i]; + } } static inline void @@ -123,8 +129,9 @@ bitmap_or(unsigned long *a, const unsigned long *b, int nr_bits) n = BITMAP_LONGS(nr_bits); - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { a[i] |= b[i]; + } } static inline void @@ -134,8 +141,9 @@ bitmap_xor(unsigned long *a, const unsigned long *b, int nr_bits) n = BITMAP_LONGS(nr_bits); - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { a[i] ^= b[i]; + } } static inline int diff --git a/kern/condition.c b/kern/condition.c index 0012aa18..2c233df0 100644 --- a/kern/condition.c +++ b/kern/condition.c @@ -44,8 +44,9 @@ condition_wait(struct condition *condition, struct mutex *mutex) assert((condition->mutex == NULL) || (condition->mutex == mutex)); - if (condition->mutex == NULL) + if (condition->mutex == NULL) { condition->mutex = mutex; + } list_insert_tail(&condition->waiters, &waiter.node); @@ -53,8 +54,9 @@ condition_wait(struct condition *condition, struct mutex *mutex) state = mutex_release(mutex); - if (state == MUTEX_CONTENDED) + if (state == MUTEX_CONTENDED) { mutex_signal(mutex); + } spinlock_unlock(&condition->lock); @@ -82,8 +84,9 @@ condition_signal(struct condition *condition) waiter = list_first_entry(&condition->waiters, struct mutex_waiter, node); list_remove(&waiter->node); - if (list_empty(&condition->waiters)) + if (list_empty(&condition->waiters)) { condition->mutex = NULL; + } spinlock_unlock(&condition->lock); diff --git a/kern/cpumap.c b/kern/cpumap.c index ccec0d22..2cc3511c 100644 --- a/kern/cpumap.c +++ b/kern/cpumap.c @@ -36,8 +36,9 @@ cpumap_setup(void) cpumap_zero(&cpumap_active_cpus); nr_cpus = cpu_count(); - for (i = 0; i < nr_cpus; i++) + for (i = 0; i < nr_cpus; i++) { cpumap_set(&cpumap_active_cpus, i); + } } const struct cpumap * @@ -53,8 +54,9 @@ cpumap_create(struct cpumap **cpumapp) cpumap = kmem_cache_alloc(&cpumap_cache); - if (cpumap == NULL) + if (cpumap == NULL) { return ERROR_NOMEM; + } *cpumapp = cpumap; return 0; @@ -73,8 +75,9 @@ cpumap_check(const struct cpumap *cpumap) index = bitmap_find_first(cpumap->cpus, cpu_count()); - if (index == -1) + if (index == -1) { return ERROR_INVAL; + } return 0; } diff --git a/kern/error.c b/kern/error.c index a86e913c..043cdb98 100644 --- a/kern/error.c +++ b/kern/error.c @@ -42,8 +42,9 @@ error_str(int error) void error_check(int error, const char *prefix) { - if (!error) + if (!error) { return; + } panic("%s%s%s", (prefix == NULL) ? "" : prefix, diff --git a/kern/evcnt.c b/kern/evcnt.c index a4eab794..803516d0 100644 --- a/kern/evcnt.c +++ b/kern/evcnt.c @@ -63,8 +63,9 @@ evcnt_info(const char *pattern) length = strlen(evcnt->name); if ((length < pattern_length) - || (memcmp(evcnt->name, pattern, pattern_length) != 0)) + || (memcmp(evcnt->name, pattern, pattern_length) != 0)) { continue; + } } printk("evcnt: %-30s %17llu\n", evcnt->name, evcnt->count); diff --git a/kern/kmem.c b/kern/kmem.c index 3f1d0d1a..59bb99d4 100644 --- a/kern/kmem.c +++ b/kern/kmem.c @@ -165,8 +165,9 @@ kmem_buf_verify_bytes(void *buf, void *pattern, size_t size) end = buf + size; for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++) - if (*ptr != *pattern_ptr) + if (*ptr != *pattern_ptr) { return ptr; + } return NULL; } @@ -181,8 +182,9 @@ kmem_buf_fill(void *buf, uint64_t pattern, size_t size) end = buf + size; - for (ptr = buf; ptr < end; ptr++) + for (ptr = buf; ptr < end; ptr++) { *ptr = pattern; + } } static void * @@ -196,8 +198,9 @@ kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new, size_t size) end = buf + size; for (ptr = buf; ptr < end; ptr++) { - if (*ptr != old) + if (*ptr != old) { return kmem_buf_verify_bytes(ptr, &old, sizeof(old)); + } *ptr = new; } @@ -261,8 +264,9 @@ kmem_slab_create(struct kmem_cache *cache, size_t color) page = vm_page_alloc(cache->slab_order, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KMEM); - if (page == NULL) + if (page == NULL) { return NULL; + } slab_buf = vm_page_direct_ptr(page); @@ -291,8 +295,9 @@ kmem_slab_create(struct kmem_cache *cache, size_t color) bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size); } - if (cache->flags & KMEM_CF_VERIFY) + if (cache->flags & KMEM_CF_VERIFY) { kmem_slab_create_verify(slab, cache); + } return slab; } @@ -383,11 +388,13 @@ kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache) for (i = 0; i < cpu_pool->transfer_size; i++) { buf = kmem_cache_alloc_from_slab(cache); - if (buf == NULL) + if (buf == NULL) { break; + } - if (ctor != NULL) + if (ctor != NULL) { ctor(buf); + } kmem_cpu_pool_push(cpu_pool, buf); } @@ -466,8 +473,9 @@ kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) buf_size = cache->buf_size; - if (buf_size < KMEM_BUF_SIZE_THRESHOLD) + if (buf_size < KMEM_BUF_SIZE_THRESHOLD) { flags |= KMEM_CACHE_NOOFFSLAB; + } i = 0; waste_min = (size_t)-1; @@ -479,18 +487,20 @@ kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) slab_size = PAGE_SIZE << slab_order; free_slab_size = slab_size; - if (flags & KMEM_CACHE_NOOFFSLAB) + if (flags & KMEM_CACHE_NOOFFSLAB) { free_slab_size -= sizeof(struct kmem_slab); + } buffers = free_slab_size / buf_size; waste = free_slab_size % buf_size; - if (buffers > i) + if (buffers > i) { i = buffers; + } - if (flags & KMEM_CACHE_NOOFFSLAB) + if (flags & KMEM_CACHE_NOOFFSLAB) { embed = 1; - else if (sizeof(struct kmem_slab) <= waste) { + } else if (sizeof(struct kmem_slab) <= waste) { embed = 1; waste -= sizeof(struct kmem_slab); } else { @@ -515,12 +525,14 @@ kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) cache->bufs_per_slab = slab_size / buf_size; cache->color_max = slab_size % buf_size; - if (cache->color_max >= PAGE_SIZE) + if (cache->color_max >= PAGE_SIZE) { cache->color_max = PAGE_SIZE - 1; + } if (optimal_embed) { - if (cache->slab_size == PAGE_SIZE) + if (cache->slab_size == PAGE_SIZE) { cache->flags |= KMEM_CF_DIRECT; + } } else { cache->flags |= KMEM_CF_SLAB_EXTERNAL; } @@ -539,11 +551,13 @@ kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size, cache->flags = 0; #endif /* KMEM_CF_VERIFY */ - if (flags & KMEM_CACHE_VERIFY) + if (flags & KMEM_CACHE_VERIFY) { cache->flags |= KMEM_CF_VERIFY; + } - if (align < KMEM_ALIGN_MIN) + if (align < KMEM_ALIGN_MIN) { align = KMEM_ALIGN_MIN; + } assert(obj_size > 0); assert(ISP2(align)); @@ -586,8 +600,9 @@ kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size, cache->cpu_pool_type = cpu_pool_type; - for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) + for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) { kmem_cpu_pool_init(&cache->cpu_pools[i], cache); + } mutex_lock(&kmem_cache_list_lock); list_insert_tail(&kmem_cache_list, &cache->node); @@ -617,8 +632,9 @@ kmem_cache_grow(struct kmem_cache *cache) color = cache->color; cache->color += cache->align; - if (cache->color > cache->color_max) + if (cache->color > cache->color_max) { cache->color = 0; + } mutex_unlock(&cache->lock); @@ -632,8 +648,9 @@ kmem_cache_grow(struct kmem_cache *cache) cache->nr_slabs++; cache->nr_free_slabs++; - if (kmem_slab_lookup_needed(cache->flags)) + if (kmem_slab_lookup_needed(cache->flags)) { kmem_slab_vmref(slab, cache->slab_size); + } } /* @@ -658,12 +675,13 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache) struct kmem_slab *slab; union kmem_bufctl *bufctl; - if (!list_empty(&cache->partial_slabs)) + if (!list_empty(&cache->partial_slabs)) { slab = list_first_entry(&cache->partial_slabs, struct kmem_slab, node); - else if (!list_empty(&cache->free_slabs)) + } else if (!list_empty(&cache->free_slabs)) { slab = list_first_entry(&cache->free_slabs, struct kmem_slab, node); - else + } else { return NULL; + } bufctl = slab->first_free; assert(bufctl != NULL); @@ -675,8 +693,9 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache) /* The slab has become complete */ list_remove(&slab->node); - if (slab->nr_refs == 1) + if (slab->nr_refs == 1) { cache->nr_free_slabs--; + } } else if (slab->nr_refs == 1) { /* * The slab has become partial. Insert the new slab at the end of @@ -727,8 +746,9 @@ kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) if (slab->nr_refs == 0) { /* The slab has become free */ - if (cache->bufs_per_slab != 1) + if (cache->bufs_per_slab != 1) { list_remove(&slab->node); + } list_insert_head(&cache->free_slabs, &slab->node); cache->nr_free_slabs++; @@ -747,14 +767,16 @@ kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, int construct) buftag = kmem_buf_to_buftag(buf, cache); - if (buftag->state != KMEM_BUFTAG_FREE) + if (buftag->state != KMEM_BUFTAG_FREE) { kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag); + } addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN, KMEM_UNINIT_PATTERN, cache->bufctl_dist); - if (addr != NULL) + if (addr != NULL) { kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr); + } addr = buf + cache->obj_size; memset(addr, KMEM_REDZONE_BYTE, cache->redzone_pad); @@ -763,8 +785,9 @@ kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, int construct) bufctl->redzone = KMEM_REDZONE_WORD; buftag->state = KMEM_BUFTAG_ALLOC; - if (construct && (cache->ctor != NULL)) + if (construct && (cache->ctor != NULL)) { cache->ctor(buf); + } } void * @@ -786,8 +809,9 @@ fast_alloc: mutex_unlock(&cpu_pool->lock); thread_unpin(); - if (verify) + if (verify) { kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT); + } return buf; } @@ -801,8 +825,9 @@ fast_alloc: filled = kmem_cache_grow(cache); - if (!filled) + if (!filled) { return NULL; + } thread_pin(); cpu_pool = kmem_cpu_pool_get(cache); @@ -823,17 +848,20 @@ slab_alloc: if (buf == NULL) { filled = kmem_cache_grow(cache); - if (!filled) + if (!filled) { return NULL; + } goto slab_alloc; } - if (cache->flags & KMEM_CF_VERIFY) + if (cache->flags & KMEM_CF_VERIFY) { kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT); + } - if (cache->ctor != NULL) + if (cache->ctor != NULL) { cache->ctor(buf); + } return buf; } @@ -850,22 +878,26 @@ kmem_cache_free_verify(struct kmem_cache *cache, void *buf) page = vm_page_lookup(vm_page_direct_pa((unsigned long)buf)); - if (page == NULL) + if (page == NULL) { kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + } slab = page->slab_priv; - if (slab == NULL) + if (slab == NULL) { kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + } slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE); - if ((unsigned long)buf >= slabend) + if ((unsigned long)buf >= slabend) { kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + } if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size) - != 0) + != 0) { kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL); + } /* * As the buffer address is valid, accessing its buftag is safe. @@ -873,18 +905,20 @@ kmem_cache_free_verify(struct kmem_cache *cache, void *buf) buftag = kmem_buf_to_buftag(buf, cache); if (buftag->state != KMEM_BUFTAG_ALLOC) { - if (buftag->state == KMEM_BUFTAG_FREE) + if (buftag->state == KMEM_BUFTAG_FREE) { kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE, NULL); - else + } else { kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag); + } } redzone_byte = buf + cache->obj_size; bufctl = kmem_buf_to_bufctl(buf, cache); while (redzone_byte < (unsigned char *)bufctl) { - if (*redzone_byte != KMEM_REDZONE_BYTE) + if (*redzone_byte != KMEM_REDZONE_BYTE) { kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte); + } redzone_byte++; } @@ -977,8 +1011,9 @@ kmem_cache_info(struct kmem_cache *cache) if (cache == NULL) { mutex_lock(&kmem_cache_list_lock); - list_for_each_entry(&kmem_cache_list, cache, node) + list_for_each_entry(&kmem_cache_list, cache, node) { kmem_cache_info(cache); + } mutex_unlock(&kmem_cache_list_lock); @@ -986,9 +1021,9 @@ kmem_cache_info(struct kmem_cache *cache) } snprintf(flags_str, sizeof(flags_str), "%s%s%s", - (cache->flags & KMEM_CF_DIRECT) ? " DIRECT" : "", - (cache->flags & KMEM_CF_SLAB_EXTERNAL) ? " SLAB_EXTERNAL" : "", - (cache->flags & KMEM_CF_VERIFY) ? " VERIFY" : ""); + (cache->flags & KMEM_CF_DIRECT) ? " DIRECT" : "", + (cache->flags & KMEM_CF_SLAB_EXTERNAL) ? " SLAB_EXTERNAL" : "", + (cache->flags & KMEM_CF_VERIFY) ? " VERIFY" : ""); mutex_lock(&cache->lock); @@ -1079,8 +1114,9 @@ kmem_alloc(size_t size) size_t index; void *buf; - if (size == 0) + if (size == 0) { return NULL; + } index = kmem_get_index(size); @@ -1090,21 +1126,23 @@ kmem_alloc(size_t size) cache = &kmem_caches[index]; buf = kmem_cache_alloc(cache); - if ((buf != NULL) && (cache->flags & KMEM_CF_VERIFY)) + if ((buf != NULL) && (cache->flags & KMEM_CF_VERIFY)) { kmem_alloc_verify(cache, buf, size); + } } else { struct vm_page *page; page = vm_page_alloc(vm_page_order(size), VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL); - if (page == NULL) + if (page == NULL) { return NULL; + } buf = vm_page_direct_ptr(page); } - return buf; + return buf; } void * @@ -1114,8 +1152,9 @@ kmem_zalloc(size_t size) ptr = kmem_alloc(size); - if (ptr == NULL) + if (ptr == NULL) { return NULL; + } memset(ptr, 0, size); return ptr; @@ -1132,8 +1171,9 @@ kmem_free_verify(struct kmem_cache *cache, void *buf, size_t size) redzone_end = buf + cache->obj_size; while (redzone_byte < redzone_end) { - if (*redzone_byte != KMEM_REDZONE_BYTE) + if (*redzone_byte != KMEM_REDZONE_BYTE) { kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte); + } redzone_byte++; } @@ -1144,8 +1184,9 @@ kmem_free(void *ptr, size_t size) { size_t index; - if ((ptr == NULL) || (size == 0)) + if ((ptr == NULL) || (size == 0)) { return; + } index = kmem_get_index(size); @@ -1154,8 +1195,9 @@ kmem_free(void *ptr, size_t size) cache = &kmem_caches[index]; - if (cache->flags & KMEM_CF_VERIFY) + if (cache->flags & KMEM_CF_VERIFY) { kmem_free_verify(cache, ptr, size); + } kmem_cache_free(cache, ptr); } else { diff --git a/kern/list.h b/kern/list.h index e64f5fd9..c85e8c15 100644 --- a/kern/list.h +++ b/kern/list.h @@ -186,8 +186,9 @@ list_concat(struct list *list1, const struct list *list2) { struct list *last1, *first2, *last2; - if (list_empty(list2)) + if (list_empty(list2)) { return; + } last1 = list1->prev; first2 = list2->next; diff --git a/kern/llsync.c b/kern/llsync.c index 489e1539..db356ff1 100644 --- a/kern/llsync.c +++ b/kern/llsync.c @@ -126,8 +126,9 @@ llsync_process_global_checkpoint(void) work_queue_transfer(&llsync_data.queue1, &llsync_data.queue0); work_queue_init(&llsync_data.queue0); - if (work_queue_nr_works(&queue) != 0) + if (work_queue_nr_works(&queue) != 0) { work_queue_schedule(&queue, 0); + } llsync_data.gcid.value++; evcnt_inc(&llsync_data.ev_global_checkpoint); @@ -136,8 +137,9 @@ llsync_process_global_checkpoint(void) static void llsync_flush_works(struct llsync_cpu_data *cpu_data) { - if (work_queue_nr_works(&cpu_data->queue0) == 0) + if (work_queue_nr_works(&cpu_data->queue0) == 0) { return; + } work_queue_concat(&llsync_data.queue0, &cpu_data->queue0); work_queue_init(&cpu_data->queue0); @@ -150,14 +152,16 @@ llsync_commit_checkpoint(unsigned int cpu) pending = cpumap_test(&llsync_data.pending_checkpoints, cpu); - if (!pending) + if (!pending) { return; + } cpumap_clear(&llsync_data.pending_checkpoints, cpu); llsync_data.nr_pending_checkpoints--; - if (llsync_data.nr_pending_checkpoints == 0) + if (llsync_data.nr_pending_checkpoints == 0) { llsync_process_global_checkpoint(); + } } void @@ -184,8 +188,9 @@ llsync_register(void) assert(!cpumap_test(&llsync_data.pending_checkpoints, cpu)); if ((llsync_data.nr_registered_cpus == 1) - && (llsync_data.nr_pending_checkpoints == 0)) + && (llsync_data.nr_pending_checkpoints == 0)) { llsync_process_global_checkpoint(); + } spinlock_unlock_intr_restore(&llsync_data.lock, flags); } @@ -252,12 +257,12 @@ llsync_report_periodic_event(void) * Check whether this periodic event occurred during a read-side critical * section, and if not, trigger a checkpoint. */ - if (cpu_data->gcid == gcid) + if (cpu_data->gcid == gcid) { llsync_commit_checkpoint(cpu_id()); - else { - if (thread_llsync_in_read_cs()) + } else { + if (thread_llsync_in_read_cs()) { evcnt_inc(&llsync_data.ev_failed_periodic_checkin); - else { + } else { cpu_data->gcid = gcid; evcnt_inc(&llsync_data.ev_periodic_checkin); llsync_commit_checkpoint(cpu_id()); @@ -308,8 +313,9 @@ llsync_wait(void) mutex_lock(&waiter.lock); - while (!waiter.done) + while (!waiter.done) { condition_wait(&waiter.cond, &waiter.lock); + } mutex_unlock(&waiter.lock); } diff --git a/kern/llsync.h b/kern/llsync.h index 0d7438bb..57ad5589 100644 --- a/kern/llsync.h +++ b/kern/llsync.h @@ -107,8 +107,9 @@ llsync_read_enter(void) in_read_cs = thread_llsync_in_read_cs(); thread_llsync_read_inc(); - if (!in_read_cs) + if (!in_read_cs) { thread_preempt_disable(); + } } static inline void @@ -116,8 +117,9 @@ llsync_read_exit(void) { thread_llsync_read_dec(); - if (!thread_llsync_in_read_cs()) + if (!thread_llsync_in_read_cs()) { thread_preempt_enable(); + } } /* diff --git a/kern/log2.h b/kern/log2.h index c9cc5be1..0a3768a7 100644 --- a/kern/log2.h +++ b/kern/log2.h @@ -36,8 +36,9 @@ iorder2(unsigned long size) { assert(size != 0); - if (size == 1) + if (size == 1) { return 0; + } return ilog2(size - 1) + 1; } diff --git a/kern/mutex.h b/kern/mutex.h index d3fe74b5..a36ed17e 100644 --- a/kern/mutex.h +++ b/kern/mutex.h @@ -51,8 +51,9 @@ mutex_trylock(struct mutex *mutex) state = mutex_tryacquire(mutex); - if (state == MUTEX_UNLOCKED) + if (state == MUTEX_UNLOCKED) { return 0; + } return 1; } @@ -64,8 +65,9 @@ mutex_lock(struct mutex *mutex) state = mutex_tryacquire(mutex); - if (state == MUTEX_UNLOCKED) + if (state == MUTEX_UNLOCKED) { return; + } assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED)); @@ -79,8 +81,9 @@ mutex_unlock(struct mutex *mutex) state = mutex_release(mutex); - if (state == MUTEX_LOCKED) + if (state == MUTEX_LOCKED) { return; + } assert(state == MUTEX_CONTENDED); diff --git a/kern/panic.c b/kern/panic.c index 252c4b68..e0bf30cc 100644 --- a/kern/panic.c +++ b/kern/panic.c @@ -33,9 +33,11 @@ panic(const char *format, ...) already_done = atomic_swap_uint(&panic_done, 1); - if (already_done) - for (;;) + if (already_done) { + for (;;) { cpu_idle(); + } + } cpu_intr_disable(); cpu_halt_broadcast(); diff --git a/kern/percpu.c b/kern/percpu.c index 71f67b1a..5b9690cc 100644 --- a/kern/percpu.c +++ b/kern/percpu.c @@ -52,14 +52,16 @@ percpu_setup(void) percpu_area_size >> 10); assert(vm_page_aligned(percpu_area_size)); - if (percpu_area_size == 0) + if (percpu_area_size == 0) { return; + } order = vm_page_order(percpu_area_size); page = vm_page_alloc(order, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL); - if (page == NULL) + if (page == NULL) { panic("percpu: unable to allocate memory for percpu area content"); + } percpu_area_content = vm_page_direct_ptr(page); memcpy(percpu_area_content, &_percpu, percpu_area_size); @@ -86,8 +88,9 @@ percpu_add(unsigned int cpu) return ERROR_INVAL; } - if (percpu_area_size == 0) + if (percpu_area_size == 0) { goto out; + } order = vm_page_order(percpu_area_size); page = vm_page_alloc(order, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL); diff --git a/kern/printk.c b/kern/printk.c index df3ba715..dd5cc87f 100644 --- a/kern/printk.c +++ b/kern/printk.c @@ -57,8 +57,9 @@ vprintk(const char *format, va_list ap) length = vsnprintf(printk_buffer, sizeof(printk_buffer), format, ap); - for (ptr = printk_buffer; *ptr != '\0'; ptr++) + for (ptr = printk_buffer; *ptr != '\0'; ptr++) { console_write_byte(*ptr); + } spinlock_unlock_intr_restore(&printk_lock, flags); diff --git a/kern/rbtree.c b/kern/rbtree.c index 569d3fac..49cb097f 100644 --- a/kern/rbtree.c +++ b/kern/rbtree.c @@ -34,8 +34,9 @@ rbtree_node_index(const struct rbtree_node *node, assert(parent != NULL); assert((node == NULL) || (rbtree_node_parent(node) == parent)); - if (parent->children[RBTREE_LEFT] == node) + if (parent->children[RBTREE_LEFT] == node) { return RBTREE_LEFT; + } assert(parent->children[RBTREE_RIGHT] == node); @@ -126,8 +127,9 @@ rbtree_node_find_deepest(struct rbtree_node *node) if (node == NULL) { node = parent->children[RBTREE_RIGHT]; - if (node == NULL) + if (node == NULL) { return parent; + } } } } @@ -151,16 +153,18 @@ rbtree_rotate(struct rbtree *tree, struct rbtree_node *node, int direction) node->children[right] = rnode->children[left]; - if (rnode->children[left] != NULL) + if (rnode->children[left] != NULL) { rbtree_node_set_parent(rnode->children[left], node); + } rnode->children[left] = node; rbtree_node_set_parent(rnode, parent); - if (unlikely(parent == NULL)) + if (unlikely(parent == NULL)) { tree->root = rnode; - else + } else { parent->children[rbtree_node_index(node, parent)] = rnode; + } rbtree_node_set_parent(node, rnode); } @@ -179,10 +183,11 @@ rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent, node->children[RBTREE_LEFT] = NULL; node->children[RBTREE_RIGHT] = NULL; - if (unlikely(parent == NULL)) + if (unlikely(parent == NULL)) { tree->root = node; - else + } else { parent->children[index] = node; + } for (;;) { if (parent == NULL) { @@ -190,8 +195,9 @@ rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent, break; } - if (rbtree_node_is_black(parent)) + if (rbtree_node_is_black(parent)) { break; + } grand_parent = rbtree_node_parent(parent); assert(grand_parent != NULL); @@ -242,11 +248,11 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node) struct rbtree_node *child, *parent, *brother; int color, left, right; - if (node->children[RBTREE_LEFT] == NULL) + if (node->children[RBTREE_LEFT] == NULL) { child = node->children[RBTREE_RIGHT]; - else if (node->children[RBTREE_RIGHT] == NULL) + } else if (node->children[RBTREE_RIGHT] == NULL) { child = node->children[RBTREE_LEFT]; - else { + } else { struct rbtree_node *successor; /* @@ -255,17 +261,19 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node) successor = node->children[RBTREE_RIGHT]; - while (successor->children[RBTREE_LEFT] != NULL) + while (successor->children[RBTREE_LEFT] != NULL) { successor = successor->children[RBTREE_LEFT]; + } color = rbtree_node_color(successor); child = successor->children[RBTREE_RIGHT]; parent = rbtree_node_parent(node); - if (unlikely(parent == NULL)) + if (unlikely(parent == NULL)) { tree->root = successor; - else + } else { parent->children[rbtree_node_index(node, parent)] = successor; + } parent = rbtree_node_parent(successor); @@ -276,16 +284,17 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node) successor->children[RBTREE_LEFT] = node->children[RBTREE_LEFT]; rbtree_node_set_parent(successor->children[RBTREE_LEFT], successor); - if (node == parent) + if (node == parent) { parent = successor; - else { + } else { successor->children[RBTREE_RIGHT] = node->children[RBTREE_RIGHT]; rbtree_node_set_parent(successor->children[RBTREE_RIGHT], successor); parent->children[RBTREE_LEFT] = child; - if (child != NULL) + if (child != NULL) { rbtree_node_set_parent(child, parent); + } } goto update_color; @@ -298,21 +307,24 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node) color = rbtree_node_color(node); parent = rbtree_node_parent(node); - if (child != NULL) + if (child != NULL) { rbtree_node_set_parent(child, parent); + } - if (unlikely(parent == NULL)) + if (unlikely(parent == NULL)) { tree->root = child; - else + } else { parent->children[rbtree_node_index(node, parent)] = child; + } /* * The node has been removed, update the colors. The child pointer can * be null, in which case it is considered a black leaf. */ update_color: - if (color == RBTREE_COLOR_RED) + if (color == RBTREE_COLOR_RED) { return; + } for (;;) { if ((child != NULL) && rbtree_node_is_red(child)) { @@ -320,8 +332,9 @@ update_color: break; } - if (parent == NULL) + if (parent == NULL) { break; + } left = rbtree_node_index(child, parent); right = 1 - left; @@ -383,13 +396,15 @@ rbtree_nearest(struct rbtree_node *parent, int index, int direction) { assert(rbtree_check_index(direction)); - if (parent == NULL) + if (parent == NULL) { return NULL; + } assert(rbtree_check_index(index)); - if (index != direction) + if (index != direction) { return parent; + } return rbtree_walk(parent, direction); } @@ -403,8 +418,9 @@ rbtree_firstlast(const struct rbtree *tree, int direction) prev = NULL; - for (cur = tree->root; cur != NULL; cur = cur->children[direction]) + for (cur = tree->root; cur != NULL; cur = cur->children[direction]) { prev = cur; + } return prev; } @@ -419,14 +435,16 @@ rbtree_walk(struct rbtree_node *node, int direction) left = direction; right = 1 - left; - if (node == NULL) + if (node == NULL) { return NULL; + } if (node->children[left] != NULL) { node = node->children[left]; - while (node->children[right] != NULL) + while (node->children[right] != NULL) { node = node->children[right]; + } } else { struct rbtree_node *parent; int index; @@ -434,14 +452,16 @@ rbtree_walk(struct rbtree_node *node, int direction) for (;;) { parent = rbtree_node_parent(node); - if (parent == NULL) + if (parent == NULL) { return NULL; + } index = rbtree_node_index(node, parent); node = parent; - if (index == right) + if (index == right) { break; + } } } @@ -455,8 +475,9 @@ rbtree_postwalk_deepest(const struct rbtree *tree) node = tree->root; - if (node == NULL) + if (node == NULL) { return NULL; + } return rbtree_node_find_deepest(node); } @@ -467,23 +488,26 @@ rbtree_postwalk_unlink(struct rbtree_node *node) struct rbtree_node *parent; int index; - if (node == NULL) + if (node == NULL) { return NULL; + } assert(node->children[RBTREE_LEFT] == NULL); assert(node->children[RBTREE_RIGHT] == NULL); parent = rbtree_node_parent(node); - if (parent == NULL) + if (parent == NULL) { return NULL; + } index = rbtree_node_index(node, parent); parent->children[index] = NULL; node = parent->children[RBTREE_RIGHT]; - if (node == NULL) + if (node == NULL) { return parent; + } return rbtree_node_find_deepest(node); } diff --git a/kern/spinlock.h b/kern/spinlock.h index 1cc9b08f..f63c4e0b 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -51,8 +51,9 @@ spinlock_trylock(struct spinlock *lock) thread_preempt_disable(); busy = spinlock_tryacquire(lock); - if (busy) + if (busy) { thread_preempt_enable(); + } return busy; } diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h index c07f6615..ed851099 100644 --- a/kern/spinlock_i.h +++ b/kern/spinlock_i.h @@ -35,8 +35,9 @@ spinlock_tryacquire(struct spinlock *lock) static inline void spinlock_acquire(struct spinlock *lock) { - while (spinlock_tryacquire(lock)) + while (spinlock_tryacquire(lock)) { cpu_pause(); + } } static inline void diff --git a/kern/sprintf.c b/kern/sprintf.c index 13f2671e..7117dbed 100644 --- a/kern/sprintf.c +++ b/kern/sprintf.c @@ -75,8 +75,9 @@ static const char sprintf_digits[] = "0123456789ABCDEF"; static inline char * sprintf_putchar(char *str, char *end, char c) { - if (str < end) + if (str < end) { *str = c; + } str++; @@ -131,12 +132,13 @@ vsnprintf(char *str, size_t size, const char *format, va_list ap) start = str; - if (size == 0) + if (size == 0) { end = NULL; - else if (size == SPRINTF_NOLIMIT) + } else if (size == SPRINTF_NOLIMIT) { end = (char *)-1; - else + } else { end = start + size - 1; + } while ((c = *format) != '\0') { if (c != '%') { @@ -217,8 +219,9 @@ vsnprintf(char *str, size_t size, const char *format, va_list ap) } else if (c == '*') { precision = va_arg(ap, int); - if (precision < 0) + if (precision < 0) { precision = 0; + } format++; c = *format; @@ -309,51 +312,58 @@ integer: case SPRINTF_SPECIFIER_INT: switch (modifier) { case SPRINTF_MODIFIER_CHAR: - if (flags & SPRINTF_FORMAT_CONV_SIGNED) + if (flags & SPRINTF_FORMAT_CONV_SIGNED) { n = (signed char)va_arg(ap, int); - else + } else { n = (unsigned char)va_arg(ap, int); + } break; case SPRINTF_MODIFIER_SHORT: - if (flags & SPRINTF_FORMAT_CONV_SIGNED) + if (flags & SPRINTF_FORMAT_CONV_SIGNED) { n = (short)va_arg(ap, int); - else + } else { n = (unsigned short)va_arg(ap, int); + } break; case SPRINTF_MODIFIER_LONG: - if (flags & SPRINTF_FORMAT_CONV_SIGNED) + if (flags & SPRINTF_FORMAT_CONV_SIGNED) { n = va_arg(ap, long); - else + } else { n = va_arg(ap, unsigned long); + } break; case SPRINTF_MODIFIER_LONGLONG: - if (flags & SPRINTF_FORMAT_CONV_SIGNED) + if (flags & SPRINTF_FORMAT_CONV_SIGNED) { n = va_arg(ap, long long); - else + } else { n = va_arg(ap, unsigned long long); + } break; case SPRINTF_MODIFIER_PTR: n = (unsigned long)va_arg(ap, void *); break; case SPRINTF_MODIFIER_SIZE: - if (flags & SPRINTF_FORMAT_CONV_SIGNED) + if (flags & SPRINTF_FORMAT_CONV_SIGNED) { n = va_arg(ap, ssize_t); - else + } else { n = va_arg(ap, size_t); + } break; case SPRINTF_MODIFIER_PTRDIFF: n = va_arg(ap, ptrdiff_t); break; default: - if (flags & SPRINTF_FORMAT_CONV_SIGNED) + if (flags & SPRINTF_FORMAT_CONV_SIGNED) { n = va_arg(ap, int); - else + } else { n = va_arg(ap, unsigned int); + } break; } - if ((flags & SPRINTF_FORMAT_LEFT_JUSTIFY) || (precision >= 0)) + if ((flags & SPRINTF_FORMAT_LEFT_JUSTIFY) || (precision >= 0)) { flags &= ~SPRINTF_FORMAT_ZERO_PAD; + } sign = 0; @@ -362,8 +372,9 @@ integer: width--; /* '0x' or '0X' for hexadecimal */ - if (base == 16) + if (base == 16) { width--; + } } else if (flags & SPRINTF_FORMAT_CONV_SIGNED) { if ((long long)n < 0) { sign = '-'; @@ -384,8 +395,9 @@ integer: i = 0; if (n == 0) { - if (precision != 0) + if (precision != 0) { tmp[i++] = '0'; + } } else if (base == 10) { /* * Try to avoid 64 bits operations if the processor doesn't @@ -429,15 +441,17 @@ integer: } while (n != 0); } - if (i > precision) + if (i > precision) { precision = i; + } width -= precision; if (!(flags & (SPRINTF_FORMAT_LEFT_JUSTIFY | SPRINTF_FORMAT_ZERO_PAD))) - while (width-- > 0) + while (width-- > 0) { str = sprintf_putchar(str, end, ' '); + } if (flags & SPRINTF_FORMAT_ALT_FORM) { str = sprintf_putchar(str, end, '0'); @@ -452,56 +466,66 @@ integer: if (!(flags & SPRINTF_FORMAT_LEFT_JUSTIFY)) { c = (flags & SPRINTF_FORMAT_ZERO_PAD) ? '0' : ' '; - while (width-- > 0) + while (width-- > 0) { str = sprintf_putchar(str, end, c); + } } - while (i < precision--) + while (i < precision--) { str = sprintf_putchar(str, end, '0'); + } - while (i-- > 0) + while (i-- > 0) { str = sprintf_putchar(str, end, tmp[i]); + } - while (width-- > 0) + while (width-- > 0) { str = sprintf_putchar(str, end, ' '); + } break; case SPRINTF_SPECIFIER_CHAR: c = (unsigned char)va_arg(ap, int); if (!(flags & SPRINTF_FORMAT_LEFT_JUSTIFY)) - while (--width > 0) + while (--width > 0) { str = sprintf_putchar(str, end, ' '); + } str = sprintf_putchar(str, end, c); - while (--width > 0) + while (--width > 0) { str = sprintf_putchar(str, end, ' '); + } break; case SPRINTF_SPECIFIER_STR: s = va_arg(ap, char *); - if (s == NULL) + if (s == NULL) { s = "(null)"; + } len = 0; for (len = 0; s[len] != '\0'; len++) - if (len == precision) + if (len == precision) { break; + } if (!(flags & SPRINTF_FORMAT_LEFT_JUSTIFY)) - while (len < width--) + while (len < width--) { str = sprintf_putchar(str, end, ' '); + } for (i = 0; i < len; i++) { str = sprintf_putchar(str, end, *s); s++; } - while (len < width--) + while (len < width--) { str = sprintf_putchar(str, end, ' '); + } break; case SPRINTF_SPECIFIER_NRCHARS: @@ -537,14 +561,16 @@ integer: break; } - if (specifier != SPRINTF_SPECIFIER_INVALID) + if (specifier != SPRINTF_SPECIFIER_INVALID) { format++; + } } - if (str < end) + if (str < end) { *str = '\0'; - else if (end != NULL) + } else if (end != NULL) { *end = '\0'; + } return str - start; } diff --git a/kern/sref.c b/kern/sref.c index d139fcc5..528eb226 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -191,10 +191,11 @@ sref_queue_push(struct sref_queue *queue, struct sref_counter *counter) { counter->next = NULL; - if (queue->last == NULL) + if (queue->last == NULL) { queue->first = counter; - else + } else { queue->last->next = counter; + } queue->last = counter; queue->size++; @@ -208,8 +209,9 @@ sref_queue_pop(struct sref_queue *queue) counter = queue->first; queue->first = counter->next; - if (queue->last == counter) + if (queue->last == counter) { queue->last = NULL; + } queue->size--; return counter; @@ -224,8 +226,9 @@ sref_queue_transfer(struct sref_queue *dest, struct sref_queue *src) static void sref_queue_concat(struct sref_queue *queue1, struct sref_queue *queue2) { - if (sref_queue_empty(queue2)) + if (sref_queue_empty(queue2)) { return; + } if (sref_queue_empty(queue1)) { sref_queue_transfer(queue1, queue2); @@ -311,10 +314,11 @@ sref_counter_add(struct sref_counter *counter, unsigned long delta) counter->value += delta; if (counter->value == 0) { - if (sref_counter_is_queued(counter)) + if (sref_counter_is_queued(counter)) { sref_counter_mark_dirty(counter); - else + } else { sref_counter_schedule_review(counter); + } } spinlock_unlock(&counter->lock); @@ -523,9 +527,9 @@ sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter) delta = sref_cache_delta(cache, sref_counter_index(counter)); - if (!sref_delta_is_valid(delta)) + if (!sref_delta_is_valid(delta)) { sref_delta_set_counter(delta, counter); - else if (sref_delta_counter(delta) != counter) { + } else if (sref_delta_counter(delta) != counter) { sref_delta_flush(delta); sref_delta_set_counter(delta, counter); evcnt_inc(&cache->ev_collision); @@ -545,8 +549,9 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue) for (i = 0; i < ARRAY_SIZE(cache->deltas); i++) { delta = sref_cache_delta(cache, i); - if (sref_delta_is_valid(delta)) + if (sref_delta_is_valid(delta)) { sref_delta_evict(delta); + } } cpu = cpu_id(); @@ -556,16 +561,17 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue) assert(sref_cache_is_registered(cache)); assert(cpumap_test(&sref_data.registered_cpus, cpu)); - if (!cpumap_test(&sref_data.pending_flushes, cpu)) + if (!cpumap_test(&sref_data.pending_flushes, cpu)) { sref_queue_init(queue); - else { + } else { cpumap_clear(&sref_data.pending_flushes, cpu); sref_data.nr_pending_flushes--; - if (sref_data.nr_pending_flushes != 0) + if (sref_data.nr_pending_flushes != 0) { sref_queue_init(queue); - else + } else { sref_end_epoch(queue); + } } spinlock_unlock(&sref_data.lock); @@ -604,8 +610,9 @@ sref_cache_manage(struct sref_cache *cache) static int sref_cache_check(struct sref_cache *cache) { - if (!sref_cache_is_dirty(cache)) + if (!sref_cache_is_dirty(cache)) { return 0; + } sref_cache_wakeup_manager(cache); return 1; @@ -661,8 +668,9 @@ sref_review(struct sref_queue *queue) } } - if (work_queue_nr_works(&works) != 0) + if (work_queue_nr_works(&works) != 0) { work_queue_schedule(&works, 0); + } if ((nr_dirty + nr_true) != 0) { spinlock_lock(&sref_data.lock); @@ -685,8 +693,9 @@ sref_manage(void *arg) thread_preempt_disable(); cpu_intr_save(&flags); - while (!sref_cache_is_dirty(cache)) + while (!sref_cache_is_dirty(cache)) { thread_sleep(NULL); + } cpu_intr_restore(flags); thread_preempt_enable(); @@ -722,8 +731,9 @@ sref_setup_manager(struct sref_cache *cache, unsigned int cpu) error = cpumap_create(&cpumap); - if (error) + if (error) { panic("sref: unable to create manager thread CPU map"); + } cpumap_zero(cpumap); cpumap_set(cpumap, cpu); @@ -735,8 +745,9 @@ sref_setup_manager(struct sref_cache *cache, unsigned int cpu) error = thread_create(&manager, &attr, sref_manage, cache); cpumap_destroy(cpumap); - if (error) + if (error) { panic("sref: unable to create manager thread"); + } cache->manager = manager; } @@ -746,11 +757,13 @@ sref_setup(void) { unsigned int i; - for (i = 1; i < cpu_count(); i++) + for (i = 1; i < cpu_count(); i++) { sref_cache_init(percpu_ptr(sref_cache, i), i); + } - for (i = 0; i < cpu_count(); i++) + for (i = 0; i < cpu_count(); i++) { sref_setup_manager(percpu_ptr(sref_cache, i), i); + } } void @@ -832,9 +845,9 @@ sref_unregister(void) error = ERROR_BUSY; } - if (error) + if (error) { sref_cache_mark_registered(cache); - else { + } else { cpumap_clear(&sref_data.registered_cpus, cpu); sref_data.nr_registered_cpus--; } @@ -855,8 +868,9 @@ sref_report_periodic_event(void) cache = sref_cache_get(); if (!sref_cache_is_registered(cache) - || (cache->manager == thread_self())) + || (cache->manager == thread_self())) { return; + } sref_cache_manage(cache); } diff --git a/kern/string.c b/kern/string.c index b671894f..88d251f8 100644 --- a/kern/string.c +++ b/kern/string.c @@ -33,8 +33,9 @@ memcpy(void *dest, const void *src, size_t n) dest_ptr = dest; src_ptr = src; - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { *dest_ptr++ = *src_ptr++; + } return dest; } @@ -52,14 +53,16 @@ memmove(void *dest, const void *src, size_t n) dest_ptr = dest; src_ptr = src; - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { *dest_ptr++ = *src_ptr++; + } } else { dest_ptr = dest + n - 1; src_ptr = src + n - 1; - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { *dest_ptr-- = *src_ptr--; + } } return dest; @@ -75,8 +78,9 @@ memset(void *s, int c, size_t n) buffer = s; - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { buffer[i] = c; + } return s; } @@ -93,8 +97,9 @@ memcmp(const void *s1, const void *s2, size_t n) a2 = s2; for (i = 0; i < n; i++) - if (a1[i] != a2[i]) + if (a1[i] != a2[i]) { return (int)a1[i] - (int)a2[i]; + } return 0; } @@ -108,8 +113,9 @@ strlen(const char *s) i = 0; - while (*s++ != '\0') + while (*s++ != '\0') { i++; + } return i; } @@ -139,8 +145,9 @@ strlcpy(char *dest, const char *src, size_t n) len = strlen(src); - if (n == 0) + if (n == 0) { goto out; + } n = (len < n) ? len : n - 1; memcpy(dest, src, n); @@ -157,8 +164,9 @@ strcmp(const char *s1, const char *s2) char c1, c2; while ((c1 = *s1) == (c2 = *s2)) { - if (c1 == '\0') + if (c1 == '\0') { return 0; + } s1++; s2++; diff --git a/kern/task.c b/kern/task.c index ede16831..47efeecd 100644 --- a/kern/task.c +++ b/kern/task.c @@ -80,8 +80,9 @@ task_create(struct task **taskp, const char *name) error = vm_map_create(&map); - if (error) + if (error) { goto error_map; + } task_init(task, name, map); @@ -122,8 +123,9 @@ task_info(struct task *task) if (task == NULL) { spinlock_lock(&task_list_lock); - list_for_each_entry(&task_list, task, node) + list_for_each_entry(&task_list, task, node) { printk("task: %s\n", task->name); + } spinlock_unlock(&task_list_lock); @@ -134,10 +136,11 @@ task_info(struct task *task) printk("task: name: %s, threads:\n", task->name); - list_for_each_entry(&task->threads, thread, task_node) + list_for_each_entry(&task->threads, thread, task_node) { printk("task: %s: %p %c %.2s:%02u %s\n", task->name, thread, thread_state_to_chr(thread), thread_schedclass_to_str(thread), thread_schedprio(thread), thread->name); + } spinlock_unlock(&task->lock); } diff --git a/kern/thread.c b/kern/thread.c index 9efcd11a..97fb7adb 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -311,8 +311,9 @@ thread_runq_init_rt(struct thread_runq *runq) rt_runq = &runq->rt_runq; rt_runq->bitmap = 0; - for (i = 0; i < ARRAY_SIZE(rt_runq->threads); i++) + for (i = 0; i < ARRAY_SIZE(rt_runq->threads); i++) { list_init(&rt_runq->threads[i]); + } } static void __init @@ -328,8 +329,9 @@ thread_ts_runq_init(struct thread_ts_runq *ts_runq) { size_t i; - for (i = 0; i < ARRAY_SIZE(ts_runq->group_array); i++) + for (i = 0; i < ARRAY_SIZE(ts_runq->group_array); i++) { thread_ts_group_init(&ts_runq->group_array[i]); + } list_init(&ts_runq->groups); list_init(&ts_runq->threads); @@ -390,13 +392,15 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) thread_sched_ops[thread->sched_class].add(runq, thread); - if (runq->nr_threads == 0) + if (runq->nr_threads == 0) { cpumap_clear_atomic(&thread_idle_runqs, thread_runq_cpu(runq)); + } runq->nr_threads++; - if (thread->sched_class < runq->current->sched_class) + if (thread->sched_class < runq->current->sched_class) { thread_set_flag(runq->current, THREAD_YIELD); + } thread->runq = runq; } @@ -409,8 +413,9 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread) runq->nr_threads--; - if (runq->nr_threads == 0) + if (runq->nr_threads == 0) { cpumap_set_atomic(&thread_idle_runqs, thread_runq_cpu(runq)); + } thread_sched_ops[thread->sched_class].remove(runq, thread); } @@ -470,8 +475,9 @@ thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) static void thread_runq_wakeup_balancer(struct thread_runq *runq) { - if (runq->balancer->state == THREAD_RUNNING) + if (runq->balancer->state == THREAD_RUNNING) { return; + } runq->balancer->state = THREAD_RUNNING; thread_runq_wakeup(runq, runq->balancer); @@ -494,8 +500,9 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev) if (prev->state != THREAD_RUNNING) { thread_runq_remove(runq, prev); - if ((runq->nr_threads == 0) && (prev != runq->balancer)) + if ((runq->nr_threads == 0) && (prev != runq->balancer)) { thread_runq_wakeup_balancer(runq); + } } next = thread_runq_get_next(runq); @@ -578,12 +585,14 @@ thread_sched_rt_add(struct thread_runq *runq, struct thread *thread) threads = &rt_runq->threads[thread->rt_data.priority]; list_insert_tail(threads, &thread->rt_data.node); - if (list_singular(threads)) + if (list_singular(threads)) { rt_runq->bitmap |= (1ULL << thread->rt_data.priority); + } if ((thread->sched_class == runq->current->sched_class) - && (thread->rt_data.priority > runq->current->rt_data.priority)) + && (thread->rt_data.priority > runq->current->rt_data.priority)) { thread_set_flag(runq->current, THREAD_YIELD); + } } static void @@ -596,8 +605,9 @@ thread_sched_rt_remove(struct thread_runq *runq, struct thread *thread) threads = &rt_runq->threads[thread->rt_data.priority]; list_remove(&thread->rt_data.node); - if (list_empty(threads)) + if (list_empty(threads)) { rt_runq->bitmap &= ~(1ULL << thread->rt_data.priority); + } } static void @@ -616,8 +626,9 @@ thread_sched_rt_get_next(struct thread_runq *runq) rt_runq = &runq->rt_runq; - if (rt_runq->bitmap == 0) + if (rt_runq->bitmap == 0) { return NULL; + } priority = THREAD_SCHED_RT_PRIO_MAX - __builtin_clzll(rt_runq->bitmap); threads = &rt_runq->threads[priority]; @@ -632,13 +643,15 @@ thread_sched_rt_tick(struct thread_runq *runq, struct thread *thread) { (void)runq; - if (thread->sched_policy != THREAD_SCHED_POLICY_RR) + if (thread->sched_policy != THREAD_SCHED_POLICY_RR) { return; + } thread->rt_data.time_slice--; - if (thread->rt_data.time_slice > 0) + if (thread->rt_data.time_slice > 0) { return; + } thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE; thread_set_flag(thread, THREAD_YIELD); @@ -669,16 +682,18 @@ thread_sched_ts_select_runq(struct thread *thread) int i; cpumap_for_each(&thread_idle_runqs, i) { - if (!cpumap_test(&thread->cpumap, i)) + if (!cpumap_test(&thread->cpumap, i)) { continue; + } runq = percpu_ptr(thread_runq, i); spinlock_lock(&runq->lock); /* The run queue really is idle, return it */ - if (runq->current == runq->idler) + if (runq->current == runq->idler) { goto out; + } spinlock_unlock(&runq->lock); } @@ -686,8 +701,9 @@ thread_sched_ts_select_runq(struct thread *thread) runq = NULL; cpumap_for_each(&thread_active_runqs, i) { - if (!cpumap_test(&thread->cpumap, i)) + if (!cpumap_test(&thread->cpumap, i)) { continue; + } tmp = percpu_ptr(thread_runq, i); @@ -740,8 +756,9 @@ thread_sched_ts_enqueue_scale(unsigned int work, unsigned int old_weight, assert(old_weight != 0); #ifndef __LP64__ - if (likely((work < 0x10000) && (new_weight < 0x10000))) + if (likely((work < 0x10000) && (new_weight < 0x10000))) { return (work * new_weight) / old_weight; + } #endif /* __LP64__ */ return (unsigned int)(((unsigned long long)work * new_weight) / old_weight); @@ -768,15 +785,16 @@ thread_sched_ts_enqueue(struct thread_ts_runq *ts_runq, unsigned long round, while (!list_end(&ts_runq->groups, node)) { tmp = list_entry(node, struct thread_ts_group, node); - if (tmp->weight >= group_weight) + if (tmp->weight >= group_weight) { break; + } node = list_prev(node); } - if (group->weight == 0) + if (group->weight == 0) { list_insert_after(node, &group->node); - else if (node != init_node) { + } else if (node != init_node) { list_remove(&group->node); list_insert_after(node, &group->node); } @@ -792,9 +810,9 @@ thread_sched_ts_enqueue(struct thread_ts_runq *ts_runq, unsigned long round, } else { unsigned int group_work, thread_work; - if (ts_runq->weight == 0) + if (ts_runq->weight == 0) { thread_work = 0; - else { + } else { group_work = (group->weight == 0) ? thread_sched_ts_enqueue_scale(ts_runq->work, ts_runq->weight, @@ -832,8 +850,9 @@ thread_sched_ts_restart(struct thread_runq *runq) assert(node != NULL); ts_runq->current = list_entry(node, struct thread_ts_group, node); - if (runq->current->sched_class == THREAD_SCHED_CLASS_TS) + if (runq->current->sched_class == THREAD_SCHED_CLASS_TS) { thread_set_flag(runq->current, THREAD_YIELD); + } } static void @@ -841,14 +860,16 @@ thread_sched_ts_add(struct thread_runq *runq, struct thread *thread) { unsigned int total_weight; - if (runq->ts_weight == 0) + if (runq->ts_weight == 0) { runq->ts_round = thread_ts_highest_round; + } total_weight = runq->ts_weight + thread->ts_data.weight; /* TODO Limit the maximum number of threads to prevent this situation */ - if (total_weight < runq->ts_weight) + if (total_weight < runq->ts_weight) { panic("thread: weight overflow"); + } runq->ts_weight = total_weight; thread_sched_ts_enqueue(runq->ts_runq_active, runq->ts_round, thread); @@ -876,17 +897,18 @@ thread_sched_ts_dequeue(struct thread *thread) group->weight -= thread->ts_data.weight; ts_runq->nr_threads--; - if (group->weight == 0) + if (group->weight == 0) { list_remove(&group->node); - else { + } else { node = list_next(&group->node); init_node = node; while (!list_end(&ts_runq->groups, node)) { tmp = list_entry(node, struct thread_ts_group, node); - if (tmp->weight <= group->weight) + if (tmp->weight <= group->weight) { break; + } node = list_next(node); } @@ -908,10 +930,11 @@ thread_sched_ts_remove(struct thread_runq *runq, struct thread *thread) thread_sched_ts_dequeue(thread); if (ts_runq == runq->ts_runq_active) { - if (ts_runq->nr_threads == 0) + if (ts_runq->nr_threads == 0) { thread_runq_wakeup_balancer(runq); - else + } else { thread_sched_ts_restart(runq); + } } } @@ -926,8 +949,9 @@ thread_sched_ts_deactivate(struct thread_runq *runq, struct thread *thread) thread->ts_data.work -= thread->ts_data.weight; thread_sched_ts_enqueue(runq->ts_runq_expired, runq->ts_round + 1, thread); - if (runq->ts_runq_active->nr_threads == 0) + if (runq->ts_runq_active->nr_threads == 0) { thread_runq_wakeup_balancer(runq); + } } static void @@ -940,8 +964,9 @@ thread_sched_ts_put_prev(struct thread_runq *runq, struct thread *thread) group = &ts_runq->group_array[thread->ts_data.priority]; list_insert_tail(&group->threads, &thread->ts_data.group_node); - if (thread->ts_data.work >= thread->ts_data.weight) + if (thread->ts_data.work >= thread->ts_data.weight) { thread_sched_ts_deactivate(runq, thread); + } } static int @@ -975,8 +1000,9 @@ thread_sched_ts_get_next(struct thread_runq *runq) ts_runq = runq->ts_runq_active; - if (ts_runq->nr_threads == 0) + if (ts_runq->nr_threads == 0) { return NULL; + } group = ts_runq->current; node = list_next(&group->node); @@ -987,9 +1013,9 @@ thread_sched_ts_get_next(struct thread_runq *runq) } else { next = list_entry(node, struct thread_ts_group, node); - if (thread_sched_ts_ratio_exceeded(group, next)) + if (thread_sched_ts_ratio_exceeded(group, next)) { group = next; - else { + } else { node = list_first(&ts_runq->groups); group = list_entry(node, struct thread_ts_group, node); } @@ -1030,8 +1056,9 @@ thread_sched_ts_start_next_round(struct thread_runq *runq) runq->ts_round++; delta = (long)(runq->ts_round - thread_ts_highest_round); - if (delta > 0) + if (delta > 0) { thread_ts_highest_round = runq->ts_round; + } thread_sched_ts_restart(runq); } @@ -1046,20 +1073,23 @@ thread_sched_ts_balance_eligible(struct thread_runq *runq, { unsigned int nr_threads; - if (runq->ts_weight == 0) + if (runq->ts_weight == 0) { return 0; + } if ((runq->ts_round != highest_round) - && (runq->ts_round != (highest_round - 1))) + && (runq->ts_round != (highest_round - 1))) { return 0; + } nr_threads = runq->ts_runq_active->nr_threads + runq->ts_runq_expired->nr_threads; if ((nr_threads == 0) || ((nr_threads == 1) - && (runq->current->sched_class == THREAD_SCHED_CLASS_TS))) + && (runq->current->sched_class == THREAD_SCHED_CLASS_TS))) { return 0; + } return 1; } @@ -1083,8 +1113,9 @@ thread_sched_ts_balance_scan(struct thread_runq *runq, cpumap_for_each(&thread_active_runqs, i) { tmp = percpu_ptr(thread_runq, i); - if (tmp == runq) + if (tmp == runq) { continue; + } spinlock_lock(&tmp->lock); @@ -1107,8 +1138,9 @@ thread_sched_ts_balance_scan(struct thread_runq *runq, spinlock_unlock(&tmp->lock); } - if (remote_runq != NULL) + if (remote_runq != NULL) { spinlock_unlock(&remote_runq->lock); + } cpu_intr_restore(flags); thread_preempt_enable(); @@ -1129,8 +1161,9 @@ thread_sched_ts_balance_pull(struct thread_runq *runq, list_for_each_entry_safe(&ts_runq->threads, thread, tmp, ts_data.runq_node) { - if (thread == remote_runq->current) + if (thread == remote_runq->current) { continue; + } /* * The pinned counter is changed without explicit synchronization. @@ -1141,11 +1174,13 @@ thread_sched_ts_balance_pull(struct thread_runq *runq, * changing the pinned counter and setting the current thread of a * run queue. */ - if (thread->pinned) + if (thread->pinned) { continue; + } - if (!cpumap_test(&thread->cpumap, cpu)) + if (!cpumap_test(&thread->cpumap, cpu)) { continue; + } /* * Make sure at least one thread is pulled if possible. If one or more @@ -1153,8 +1188,9 @@ thread_sched_ts_balance_pull(struct thread_runq *runq, */ if ((nr_pulls != 0) && ((runq->ts_weight + thread->ts_data.weight) - > (remote_runq->ts_weight - thread->ts_data.weight))) + > (remote_runq->ts_weight - thread->ts_data.weight))) { break; + } thread_runq_remove(remote_runq, thread); @@ -1164,8 +1200,9 @@ thread_sched_ts_balance_pull(struct thread_runq *runq, thread_runq_add(runq, thread); nr_pulls++; - if (nr_pulls == THREAD_MAX_MIGRATIONS) + if (nr_pulls == THREAD_MAX_MIGRATIONS) { break; + } } return nr_pulls; @@ -1180,14 +1217,16 @@ thread_sched_ts_balance_migrate(struct thread_runq *runq, nr_pulls = 0; - if (!thread_sched_ts_balance_eligible(remote_runq, highest_round)) + if (!thread_sched_ts_balance_eligible(remote_runq, highest_round)) { goto out; + } nr_pulls = thread_sched_ts_balance_pull(runq, remote_runq, remote_runq->ts_runq_active, 0); - if (nr_pulls == THREAD_MAX_MIGRATIONS) + if (nr_pulls == THREAD_MAX_MIGRATIONS) { goto out; + } /* * Threads in the expired queue of a processor in round highest are @@ -1224,8 +1263,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags) highest_round = thread_ts_highest_round; if ((runq->ts_round != highest_round) - && (runq->ts_runq_expired->nr_threads != 0)) + && (runq->ts_runq_expired->nr_threads != 0)) { goto no_migration; + } spinlock_unlock_intr_restore(&runq->lock, *flags); thread_preempt_enable(); @@ -1240,8 +1280,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags) highest_round); spinlock_unlock(&remote_runq->lock); - if (nr_migrations != 0) + if (nr_migrations != 0) { return; + } spinlock_unlock_intr_restore(&runq->lock, *flags); thread_preempt_enable(); @@ -1256,8 +1297,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags) cpumap_for_each(&thread_active_runqs, i) { remote_runq = percpu_ptr(thread_runq, i); - if (remote_runq == runq) + if (remote_runq == runq) { continue; + } thread_preempt_disable(); cpu_intr_save(flags); @@ -1266,8 +1308,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags) highest_round); spinlock_unlock(&remote_runq->lock); - if (nr_migrations != 0) + if (nr_migrations != 0) { return; + } spinlock_unlock_intr_restore(&runq->lock, *flags); thread_preempt_enable(); @@ -1285,8 +1328,9 @@ no_migration: * queue lock must remain held until the next scheduling decision to * prevent a remote balancer thread from stealing active threads. */ - if (runq->ts_runq_active->nr_threads == 0) + if (runq->ts_runq_active->nr_threads == 0) { thread_sched_ts_start_next_round(runq); + } } static void @@ -1456,8 +1500,9 @@ thread_destroy_tsd(struct thread *thread) thread->tsd[i] = NULL; thread_dtors[i](ptr); - if (thread->tsd[i] == NULL) + if (thread->tsd[i] == NULL) { i++; + } } } @@ -1512,13 +1557,15 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr, thread->fn = fn; thread->arg = arg; - if (attr->flags & THREAD_ATTR_DETACHED) + if (attr->flags & THREAD_ATTR_DETACHED) { thread->flags |= THREAD_DETACHED; + } error = tcb_init(&thread->tcb, stack, thread_main); - if (error) + if (error) { goto error_tsd; + } task_add_thread(task, thread); @@ -1541,8 +1588,9 @@ thread_lock_runq(struct thread *thread, unsigned long *flags) spinlock_lock_intr_save(&runq->lock, flags); - if (runq == thread->runq) + if (runq == thread->runq) { return runq; + } spinlock_unlock_intr_restore(&runq->lock, *flags); } @@ -1579,8 +1627,9 @@ thread_join_common(struct thread *thread) mutex_lock(&thread->join_lock); - while (!thread->exited) + while (!thread->exited) { condition_wait(&thread->join_cond, &thread->join_lock); + } mutex_unlock(&thread->join_lock); @@ -1598,8 +1647,9 @@ thread_reap(void *arg) for (;;) { mutex_lock(&thread_reap_lock); - while (list_empty(&thread_reap_list)) + while (list_empty(&thread_reap_list)) { condition_wait(&thread_reap_cond, &thread_reap_lock); + } list_set_head(&zombies, &thread_reap_list); list_init(&thread_reap_list); @@ -1630,8 +1680,9 @@ thread_setup_reaper(void) thread_attr_init(&attr, "x15_thread_reap"); error = thread_create(&thread, &attr, thread_reap, NULL); - if (error) + if (error) { panic("thread: unable to create reaper thread"); + } } static void @@ -1643,13 +1694,15 @@ thread_balance_idle_tick(struct thread_runq *runq) * Interrupts can occur early, at a time the balancer thread hasn't been * created yet. */ - if (runq->balancer == NULL) + if (runq->balancer == NULL) { return; + } runq->idle_balance_ticks--; - if (runq->idle_balance_ticks == 0) + if (runq->idle_balance_ticks == 0) { thread_runq_wakeup_balancer(runq); + } } static void @@ -1692,8 +1745,9 @@ thread_setup_balancer(struct thread_runq *runq) error = cpumap_create(&cpumap); - if (error) + if (error) { panic("thread: unable to create balancer thread CPU map"); + } cpumap_zero(cpumap); cpumap_set(cpumap, thread_runq_cpu(runq)); @@ -1706,8 +1760,9 @@ thread_setup_balancer(struct thread_runq *runq) error = thread_create(&balancer, &attr, thread_balance, runq); cpumap_destroy(cpumap); - if (error) + if (error) { panic("thread: unable to create balancer thread"); + } runq->balancer = balancer; } @@ -1764,20 +1819,23 @@ thread_setup_idler(struct thread_runq *runq) error = cpumap_create(&cpumap); - if (error) + if (error) { panic("thread: unable to allocate idler thread CPU map"); + } cpumap_zero(cpumap); cpumap_set(cpumap, thread_runq_cpu(runq)); idler = kmem_cache_alloc(&thread_cache); - if (idler == NULL) + if (idler == NULL) { panic("thread: unable to allocate idler thread"); + } stack = kmem_cache_alloc(&thread_stack_cache); - if (stack == NULL) + if (stack == NULL) { panic("thread: unable to allocate idler thread stack"); + } snprintf(name, sizeof(name), "x15_thread_idle/%u", thread_runq_cpu(runq)); thread_attr_init(&attr, name); @@ -1785,8 +1843,9 @@ thread_setup_idler(struct thread_runq *runq) thread_attr_set_policy(&attr, THREAD_SCHED_POLICY_IDLE); error = thread_init(idler, stack, &attr, thread_idle, NULL); - if (error) + if (error) { panic("thread: unable to initialize idler thread"); + } cpumap_destroy(cpumap); @@ -1808,8 +1867,9 @@ thread_setup(void) { int cpu; - for (cpu = 1; (unsigned int)cpu < cpu_count(); cpu++) + for (cpu = 1; (unsigned int)cpu < cpu_count(); cpu++) { thread_bootstrap_common(cpu); + } kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), CPU_L1_SIZE, NULL, 0); @@ -1818,8 +1878,9 @@ thread_setup(void) thread_setup_reaper(); - cpumap_for_each(&thread_active_runqs, cpu) + cpumap_for_each(&thread_active_runqs, cpu) { thread_setup_runq(percpu_ptr(thread_runq, cpu)); + } } int @@ -1834,8 +1895,9 @@ thread_create(struct thread **threadp, const struct thread_attr *attr, if (attr->cpumap != NULL) { error = cpumap_check(attr->cpumap); - if (error) + if (error) { return error; + } } thread = kmem_cache_alloc(&thread_cache); @@ -1854,8 +1916,9 @@ thread_create(struct thread **threadp, const struct thread_attr *attr, error = thread_init(thread, stack, attr, fn, arg); - if (error) + if (error) { goto error_init; + } /* * The new thread address must be written before the thread is started @@ -1994,9 +2057,9 @@ thread_wakeup(struct thread *thread) thread_preempt_disable(); cpu_intr_save(&flags); - if (!thread->pinned) + if (!thread->pinned) { runq = thread_sched_ops[thread->sched_class].select_runq(thread); - else { + } else { runq = thread->runq; spinlock_lock(&runq->lock); } @@ -2039,8 +2102,9 @@ thread_yield(void) thread = thread_self(); - if (!thread_preempt_enabled()) + if (!thread_preempt_enabled()) { return; + } do { thread_preempt_disable(); @@ -2082,8 +2146,9 @@ thread_tick_intr(void) spinlock_lock(&runq->lock); - if (runq->nr_threads == 0) + if (runq->nr_threads == 0) { thread_balance_idle_tick(runq); + } thread_sched_ops[thread->sched_class].tick(runq, thread); @@ -2142,8 +2207,9 @@ thread_key_create(unsigned int *keyp, thread_dtor_fn_t dtor) key = atomic_fetchadd_uint(&thread_nr_keys, 1); - if (key >= THREAD_KEYS_MAX) + if (key >= THREAD_KEYS_MAX) { panic("thread: maximum number of keys exceeded"); + } thread_dtors[key] = dtor; *keyp = key; diff --git a/kern/thread.h b/kern/thread.h index 36aea2ff..2c483ab4 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -409,8 +409,9 @@ thread_schedule(void) { barrier(); - if (likely(!thread_test_flag(thread_self(), THREAD_YIELD))) + if (likely(!thread_test_flag(thread_self(), THREAD_YIELD))) { return; + } thread_yield(); } diff --git a/kern/work.c b/kern/work.c index f89dd389..f3d0848a 100644 --- a/kern/work.c +++ b/kern/work.c @@ -192,8 +192,9 @@ work_pool_init(struct work_pool *pool, unsigned int cpu, int flags) id = work_pool_alloc_id(pool); error = work_thread_create(pool, id); - if (error) + if (error) { goto error_thread; + } return; @@ -212,9 +213,9 @@ work_pool_cpu_select(int flags) static void work_pool_acquire(struct work_pool *pool, unsigned long *flags) { - if (pool->flags & WORK_PF_GLOBAL) + if (pool->flags & WORK_PF_GLOBAL) { spinlock_lock_intr_save(&pool->lock, flags); - else { + } else { thread_preempt_disable(); cpu_intr_save(flags); } @@ -223,9 +224,9 @@ work_pool_acquire(struct work_pool *pool, unsigned long *flags) static void work_pool_release(struct work_pool *pool, unsigned long flags) { - if (pool->flags & WORK_PF_GLOBAL) + if (pool->flags & WORK_PF_GLOBAL) { spinlock_unlock_intr_restore(&pool->lock, flags); - else { + } else { cpu_intr_restore(flags); thread_preempt_enable(); } @@ -242,8 +243,9 @@ static struct work * work_pool_pop_work(struct work_pool *pool) { if (!(pool->flags & WORK_PF_GLOBAL)) { - if (work_queue_nr_works(&pool->queue1) != 0) + if (work_queue_nr_works(&pool->queue1) != 0) { return work_queue_pop(&pool->queue1); + } } return work_queue_pop(&pool->queue0); @@ -252,11 +254,13 @@ work_pool_pop_work(struct work_pool *pool) static void work_pool_wakeup_manager(struct work_pool *pool) { - if (work_pool_nr_works(pool) == 0) + if (work_pool_nr_works(pool) == 0) { return; + } - if ((pool->manager != NULL) && (pool->manager->thread != thread_self())) + if ((pool->manager != NULL) && (pool->manager->thread != thread_self())) { thread_wakeup(pool->manager->thread); + } } static void @@ -268,8 +272,9 @@ work_pool_shift_queues(struct work_pool *pool, struct work_queue *old_queue) work_queue_transfer(&pool->queue1, &pool->queue0); work_queue_init(&pool->queue0); - if (work_queue_nr_works(old_queue) != 0) + if (work_queue_nr_works(old_queue) != 0) { evcnt_inc(&pool->ev_transfer); + } } static void @@ -308,9 +313,9 @@ work_process(void *arg) list_insert_tail(&pool->available_threads, &self->node); pool->nr_available_threads++; - do + do { thread_sleep(lock); - while (pool->manager != NULL); + } while (pool->manager != NULL); list_remove(&self->node); pool->nr_available_threads--; @@ -338,14 +343,15 @@ work_process(void *arg) } if (work_pool_nr_works(pool) == 0) { - if (pool->nr_threads > WORK_THREADS_SPARE) + if (pool->nr_threads > WORK_THREADS_SPARE) { break; + } pool->manager = self; - do + do { thread_sleep(lock); - while (work_pool_nr_works(pool) == 0); + } while (work_pool_nr_works(pool) == 0); pool->manager = NULL; } @@ -396,8 +402,9 @@ work_thread_create(struct work_pool *pool, unsigned int id) worker = kmem_cache_alloc(&work_thread_cache); - if (worker == NULL) + if (worker == NULL) { return ERROR_NOMEM; + } worker->pool = pool; worker->id = id; @@ -419,8 +426,9 @@ work_thread_create(struct work_pool *pool, unsigned int id) error = cpumap_create(&cpumap); - if (error) + if (error) { goto error_cpumap; + } pool_id = work_pool_cpu_id(pool); cpumap_zero(cpumap); @@ -432,16 +440,19 @@ work_thread_create(struct work_pool *pool, unsigned int id) thread_attr_init(&attr, name); thread_attr_set_priority(&attr, priority); - if (cpumap != NULL) + if (cpumap != NULL) { thread_attr_set_cpumap(&attr, cpumap); + } error = thread_create(&worker->thread, &attr, work_process, worker); - if (cpumap != NULL) + if (cpumap != NULL) { cpumap_destroy(cpumap); + } - if (error) + if (error) { goto error_thread; + } return 0; diff --git a/kern/work.h b/kern/work.h index fb22db90..6e3876f0 100644 --- a/kern/work.h +++ b/kern/work.h @@ -78,10 +78,11 @@ work_queue_push(struct work_queue *queue, struct work *work) { work->next = NULL; - if (queue->last == NULL) + if (queue->last == NULL) { queue->first = work; - else + } else { queue->last->next = work; + } queue->last = work; queue->nr_works++; @@ -95,8 +96,9 @@ work_queue_pop(struct work_queue *queue) work = queue->first; queue->first = work->next; - if (queue->last == work) + if (queue->last == work) { queue->last = NULL; + } queue->nr_works--; return work; @@ -111,8 +113,9 @@ work_queue_transfer(struct work_queue *dest, struct work_queue *src) static inline void work_queue_concat(struct work_queue *queue1, struct work_queue *queue2) { - if (queue2->nr_works == 0) + if (queue2->nr_works == 0) { return; + } if (queue1->nr_works == 0) { *queue1 = *queue2; diff --git a/kern/xcall.c b/kern/xcall.c index 7ea299db..398001d1 100644 --- a/kern/xcall.c +++ b/kern/xcall.c @@ -101,8 +101,9 @@ xcall_setup(void) { unsigned int i; - for (i = 0; i < cpu_count(); i++) + for (i = 0; i < cpu_count(); i++) { xcall_cpu_data_init(percpu_ptr(xcall_cpu_data, i)); + } } void @@ -139,8 +140,9 @@ xcall_call(xcall_fn_t fn, void *arg, unsigned int cpu) cpu_send_xcall(cpu); - while (remote_data->recv_call != NULL) + while (remote_data->recv_call != NULL) { cpu_pause(); + } spinlock_unlock(&remote_data->lock); |