summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-12-09 01:41:06 +0100
committerRichard Braun <rbraun@sceen.net>2016-12-09 01:41:06 +0100
commit39c13b3b84b34e0938220126c8f147d2b0b6ac89 (patch)
tree92accef33f04f49a01765e00ec026b092ae0c8ca
parent84c92cd2be8bc4aea6c14a186f79c2277f0fd4aa (diff)
Force brackets around one-line conditional statements
This change was done using astyle, with a few manual editing here and there.
-rw-r--r--arch/x86/machine/acpimp.c66
-rw-r--r--arch/x86/machine/biosmem.c80
-rw-r--r--arch/x86/machine/boot.c65
-rw-r--r--arch/x86/machine/cga.c10
-rw-r--r--arch/x86/machine/cpu.c43
-rw-r--r--arch/x86/machine/cpu.h11
-rw-r--r--arch/x86/machine/lapic.c9
-rw-r--r--arch/x86/machine/pic.c9
-rw-r--r--arch/x86/machine/pit.c3
-rw-r--r--arch/x86/machine/pmap.c159
-rw-r--r--arch/x86/machine/strace.c28
-rw-r--r--arch/x86/machine/string.c3
-rw-r--r--arch/x86/machine/tcb.c3
-rw-r--r--arch/x86/machine/trap.c9
-rw-r--r--kern/bitmap.c28
-rw-r--r--kern/bitmap.h24
-rw-r--r--kern/condition.c9
-rw-r--r--kern/cpumap.c9
-rw-r--r--kern/error.c3
-rw-r--r--kern/evcnt.c3
-rw-r--r--kern/kmem.c144
-rw-r--r--kern/list.h3
-rw-r--r--kern/llsync.c26
-rw-r--r--kern/llsync.h6
-rw-r--r--kern/log2.h3
-rw-r--r--kern/mutex.h9
-rw-r--r--kern/panic.c6
-rw-r--r--kern/percpu.c9
-rw-r--r--kern/printk.c3
-rw-r--r--kern/rbtree.c90
-rw-r--r--kern/spinlock.h3
-rw-r--r--kern/spinlock_i.h3
-rw-r--r--kern/sprintf.c96
-rw-r--r--kern/sref.c60
-rw-r--r--kern/string.c24
-rw-r--r--kern/task.c9
-rw-r--r--kern/thread.c220
-rw-r--r--kern/thread.h3
-rw-r--r--kern/work.c49
-rw-r--r--kern/work.h11
-rw-r--r--kern/xcall.c6
-rw-r--r--test/test_llsync_defer.c24
-rw-r--r--test/test_pmap_update_mp.c9
-rw-r--r--test/test_sref_dirty_zeroes.c6
-rw-r--r--test/test_sref_noref.c18
-rw-r--r--test/test_xcall.c3
-rw-r--r--vm/vm_kmem.c30
-rw-r--r--vm/vm_map.c131
-rw-r--r--vm/vm_page.c67
49 files changed, 1059 insertions, 586 deletions
diff --git a/arch/x86/machine/acpimp.c b/arch/x86/machine/acpimp.c
index b8d9cd26..b294da92 100644
--- a/arch/x86/machine/acpimp.c
+++ b/arch/x86/machine/acpimp.c
@@ -140,8 +140,9 @@ acpimp_table_required(const struct acpimp_sdth *table)
acpimp_table_sig(table, sig);
for (i = 0; i < ARRAY_SIZE(acpimp_table_addrs); i++)
- if (strcmp(sig, acpimp_table_addrs[i].sig) == 0)
+ if (strcmp(sig, acpimp_table_addrs[i].sig) == 0) {
return 1;
+ }
return 0;
}
@@ -175,8 +176,9 @@ acpimp_lookup_table(const char *sig)
size_t i;
for (i = 0; i < ARRAY_SIZE(acpimp_table_addrs); i++)
- if (strcmp(sig, acpimp_table_addrs[i].sig) == 0)
+ if (strcmp(sig, acpimp_table_addrs[i].sig) == 0) {
return acpimp_table_addrs[i].table;
+ }
return NULL;
}
@@ -205,8 +207,9 @@ acpimp_free_tables(void)
for (i = 0; i < ARRAY_SIZE(acpimp_table_addrs); i++) {
table = acpimp_table_addrs[i].table;
- if (table != NULL)
+ if (table != NULL) {
kmem_free(table, table->length);
+ }
}
}
@@ -220,8 +223,9 @@ acpimp_checksum(const void *ptr, size_t size)
bytes = ptr;
checksum = 0;
- for (i = 0; i < size; i++)
+ for (i = 0; i < size; i++) {
checksum += bytes[i];
+ }
return checksum;
}
@@ -231,13 +235,15 @@ acpimp_check_rsdp(const struct acpimp_rsdp *rsdp)
{
unsigned int checksum;
- if (memcmp(rsdp->signature, ACPIMP_RSDP_SIG, sizeof(rsdp->signature)) != 0)
+ if (memcmp(rsdp->signature, ACPIMP_RSDP_SIG, sizeof(rsdp->signature)) != 0) {
return -1;
+ }
checksum = acpimp_checksum(rsdp, sizeof(*rsdp));
- if (checksum != 0)
+ if (checksum != 0) {
return -1;
+ }
return 0;
}
@@ -253,20 +259,23 @@ acpimp_get_rsdp(phys_addr_t start, size_t size, struct acpimp_rsdp *rsdp)
assert(size > 0);
assert(P2ALIGNED(size, ACPIMP_RSDP_ALIGN));
- if (!P2ALIGNED(start, ACPIMP_RSDP_ALIGN))
+ if (!P2ALIGNED(start, ACPIMP_RSDP_ALIGN)) {
return -1;
+ }
addr = (unsigned long)vm_kmem_map_pa(start, size, &map_addr, &map_size);
- if (addr == 0)
+ if (addr == 0) {
panic("acpimp: unable to map bios memory in kernel map");
+ }
for (end = addr + size; addr < end; addr += ACPIMP_RSDP_ALIGN) {
src = (const struct acpimp_rsdp *)addr;
error = acpimp_check_rsdp(src);
- if (!error)
+ if (!error) {
break;
+ }
}
if (!(addr < end)) {
@@ -292,8 +301,9 @@ acpimp_find_rsdp(struct acpimp_rsdp *rsdp)
ptr = vm_kmem_map_pa(BIOSMEM_EBDA_PTR, sizeof(*ptr), &map_addr, &map_size);
- if (ptr == NULL)
+ if (ptr == NULL) {
panic("acpimp: unable to map ebda pointer in kernel map");
+ }
base = *((const volatile uint16_t *)ptr);
vm_kmem_unmap_pa(map_addr, map_size);
@@ -302,15 +312,17 @@ acpimp_find_rsdp(struct acpimp_rsdp *rsdp)
base <<= 4;
error = acpimp_get_rsdp(base, 1024, rsdp);
- if (!error)
+ if (!error) {
return 0;
+ }
}
error = acpimp_get_rsdp(BIOSMEM_EXT_ROM, BIOSMEM_END - BIOSMEM_EXT_ROM,
rsdp);
- if (!error)
+ if (!error) {
return 0;
+ }
printk("acpimp: unable to find root system description pointer\n");
return -1;
@@ -338,8 +350,9 @@ acpimp_copy_table(uint32_t addr)
table = vm_kmem_map_pa(addr, sizeof(*table), &map_addr, &map_size);
- if (table == NULL)
+ if (table == NULL) {
panic("acpimp: unable to map acpi data in kernel map");
+ }
if (!acpimp_table_required(table)) {
copy = NULL;
@@ -351,8 +364,9 @@ acpimp_copy_table(uint32_t addr)
table = vm_kmem_map_pa(addr, size, &map_addr, &map_size);
- if (table == NULL)
+ if (table == NULL) {
panic("acpimp: unable to map acpi data in kernel map");
+ }
checksum = acpimp_checksum(table, size);
@@ -367,8 +381,9 @@ acpimp_copy_table(uint32_t addr)
copy = kmem_alloc(size);
- if (copy == NULL)
+ if (copy == NULL) {
panic("acpimp: unable to allocate memory for acpi data copy");
+ }
memcpy(copy, table, size);
@@ -387,8 +402,9 @@ acpimp_copy_tables(const struct acpimp_rsdp *rsdp)
table = acpimp_copy_table(rsdp->rsdt_address);
- if (table == NULL)
+ if (table == NULL) {
return -1;
+ }
acpimp_register_table(table);
@@ -398,16 +414,18 @@ acpimp_copy_tables(const struct acpimp_rsdp *rsdp)
for (addr = rsdt->entries; addr < end; addr++) {
table = acpimp_copy_table(*addr);
- if (table == NULL)
+ if (table == NULL) {
continue;
+ }
acpimp_register_table(table);
}
error = acpimp_check_tables();
- if (error)
+ if (error) {
goto error;
+ }
return 0;
@@ -439,8 +457,9 @@ acpimp_madt_iter_next(struct acpimp_madt_iter *iter)
static void __init
acpimp_load_lapic(const struct acpimp_madt_entry_lapic *lapic, int *is_bsp)
{
- if (!(lapic->flags & ACPIMP_MADT_LAPIC_ENABLED))
+ if (!(lapic->flags & ACPIMP_MADT_LAPIC_ENABLED)) {
return;
+ }
cpu_mp_register_lapic(lapic->apic_id, *is_bsp);
*is_bsp = 0;
@@ -460,12 +479,13 @@ acpimp_load_madt(void)
lapic_setup(madt->lapic_addr);
is_bsp = 1;
- acpimp_madt_foreach(madt, &iter)
+ acpimp_madt_foreach(madt, &iter) {
switch (iter.entry->type) {
case ACPIMP_MADT_ENTRY_LAPIC:
acpimp_load_lapic(&iter.entry->lapic, &is_bsp);
break;
}
+ }
}
int __init
@@ -476,13 +496,15 @@ acpimp_setup(void)
error = acpimp_find_rsdp(&rsdp);
- if (error)
+ if (error) {
return error;
+ }
error = acpimp_copy_tables(&rsdp);
- if (error)
+ if (error) {
return error;
+ }
acpimp_info();
acpimp_load_madt();
diff --git a/arch/x86/machine/biosmem.c b/arch/x86/machine/biosmem.c
index 0db7bfde..dd55b04b 100644
--- a/arch/x86/machine/biosmem.c
+++ b/arch/x86/machine/biosmem.c
@@ -156,7 +156,7 @@ biosmem_register_boot_data(phys_addr_t start, phys_addr_t end, bool temporary)
for (i = 0; i < biosmem_nr_boot_data; i++) {
/* Check if the new range overlaps */
if ((end > biosmem_boot_data_array[i].start)
- && (start < biosmem_boot_data_array[i].end)) {
+ && (start < biosmem_boot_data_array[i].end)) {
/*
* If it does, check whether it's part of another range.
@@ -321,8 +321,9 @@ biosmem_map_sort(void)
tmp = biosmem_map[i];
for (j = i - 1; j < i; j--) {
- if (biosmem_map[j].base_addr < tmp.base_addr)
+ if (biosmem_map[j].base_addr < tmp.base_addr) {
break;
+ }
biosmem_map[j + 1] = biosmem_map[j];
}
@@ -402,19 +403,20 @@ biosmem_map_adjust(void)
continue;
}
- if (tmp.type == a->type)
+ if (tmp.type == a->type) {
first = a;
- else if (tmp.type == b->type)
+ } else if (tmp.type == b->type) {
first = b;
- else {
+ } else {
/*
* If the overlapping area can't be merged with one of its
* neighbors, it must be added as a new entry.
*/
- if (biosmem_map_size >= ARRAY_SIZE(biosmem_map))
+ if (biosmem_map_size >= ARRAY_SIZE(biosmem_map)) {
boot_panic(biosmem_panic_too_big_msg);
+ }
biosmem_map[biosmem_map_size] = tmp;
biosmem_map_size++;
@@ -422,8 +424,9 @@ biosmem_map_adjust(void)
continue;
}
- if (first->base_addr > tmp.base_addr)
+ if (first->base_addr > tmp.base_addr) {
first->base_addr = tmp.base_addr;
+ }
first->length += tmp.length;
j++;
@@ -455,32 +458,38 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
map_end = biosmem_map + biosmem_map_size;
for (entry = biosmem_map; entry < map_end; entry++) {
- if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE) {
continue;
+ }
start = vm_page_round(entry->base_addr);
- if (start >= *phys_end)
+ if (start >= *phys_end) {
break;
+ }
end = vm_page_trunc(entry->base_addr + entry->length);
if ((start < end) && (start < *phys_end) && (end > *phys_start)) {
- if (seg_start == (phys_addr_t)-1)
+ if (seg_start == (phys_addr_t)-1) {
seg_start = start;
+ }
seg_end = end;
}
}
- if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1))
+ if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1)) {
return -1;
+ }
- if (seg_start > *phys_start)
+ if (seg_start > *phys_start) {
*phys_start = seg_start;
+ }
- if (seg_end < *phys_end)
+ if (seg_end < *phys_end) {
*phys_end = seg_end;
+ }
return 0;
}
@@ -594,8 +603,9 @@ biosmem_setup_allocator(const struct multiboot_raw_info *mbi)
end = vm_page_trunc((mbi->mem_upper + 1024) << 10);
#ifndef __LP64__
- if (end > VM_PAGE_DIRECTMAP_LIMIT)
+ if (end > VM_PAGE_DIRECTMAP_LIMIT) {
end = VM_PAGE_DIRECTMAP_LIMIT;
+ }
#endif /* __LP64__ */
max_heap_start = 0;
@@ -617,8 +627,9 @@ biosmem_setup_allocator(const struct multiboot_raw_info *mbi)
start = heap_end;
}
- if (max_heap_start >= max_heap_end)
+ if (max_heap_start >= max_heap_end) {
boot_panic(biosmem_panic_setup_msg);
+ }
biosmem_heap_start = max_heap_start;
biosmem_heap_end = max_heap_end;
@@ -636,10 +647,11 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_addr_t phys_start, phys_end;
int error;
- if (mbi->flags & MULTIBOOT_LOADER_MMAP)
+ if (mbi->flags & MULTIBOOT_LOADER_MMAP) {
biosmem_map_build(mbi);
- else
+ } else {
biosmem_map_build_simple(mbi);
+ }
biosmem_map_adjust();
@@ -647,8 +659,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_DMA_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
boot_panic(biosmem_panic_noseg_msg);
+ }
biosmem_set_segment(VM_PAGE_SEG_DMA, phys_start, phys_end);
@@ -657,8 +670,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_DMA32_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
goto out;
+ }
biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end);
@@ -667,8 +681,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_DIRECTMAP_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
goto out;
+ }
biosmem_set_segment(VM_PAGE_SEG_DIRECTMAP, phys_start, phys_end);
@@ -676,8 +691,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_HIGHMEM_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
goto out;
+ }
biosmem_set_segment(VM_PAGE_SEG_HIGHMEM, phys_start, phys_end);
@@ -692,8 +708,9 @@ biosmem_bootalloc(unsigned int nr_pages)
size = vm_page_ptoa(nr_pages);
- if (size == 0)
+ if (size == 0) {
boot_panic(biosmem_panic_inval_msg);
+ }
if (biosmem_heap_topdown) {
addr = biosmem_heap_top - size;
@@ -722,12 +739,13 @@ biosmem_bootalloc(unsigned int nr_pages)
phys_addr_t __boot
biosmem_directmap_end(void)
{
- if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0)
+ if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0) {
return biosmem_segment_end(VM_PAGE_SEG_DIRECTMAP);
- else if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0)
+ } else if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0) {
return biosmem_segment_end(VM_PAGE_SEG_DMA32);
- else
+ } else {
return biosmem_segment_end(VM_PAGE_SEG_DMA);
+ }
}
#if DEBUG
@@ -836,8 +854,9 @@ biosmem_setup(void)
: (uint64_t)1 << cpu->phys_addr_width;
for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
- if (biosmem_segment_size(i) == 0)
+ if (biosmem_segment_size(i) == 0) {
break;
+ }
seg = &biosmem_segments[i];
biosmem_load_segment(seg, max_phys_end);
@@ -911,13 +930,15 @@ biosmem_free_usable(void)
for (i = 0; i < biosmem_map_size; i++) {
entry = &biosmem_map[i];
- if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE) {
continue;
+ }
start = vm_page_round(entry->base_addr);
- if (start >= VM_PAGE_HIGHMEM_LIMIT)
+ if (start >= VM_PAGE_HIGHMEM_LIMIT) {
break;
+ }
end = vm_page_trunc(entry->base_addr + entry->length);
@@ -925,8 +946,9 @@ biosmem_free_usable(void)
end = VM_PAGE_HIGHMEM_LIMIT;
}
- if (start < BIOSMEM_BASE)
+ if (start < BIOSMEM_BASE) {
start = BIOSMEM_BASE;
+ }
if (start >= end) {
continue;
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 4b1b24f8..d5359755 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -107,14 +107,16 @@ boot_memmove(void *dest, const void *src, size_t n)
dest_ptr = dest;
src_ptr = src;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr++ = *src_ptr++;
+ }
} else {
dest_ptr = dest + n - 1;
src_ptr = src + n - 1;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr-- = *src_ptr--;
+ }
}
return dest;
@@ -128,8 +130,9 @@ boot_memset(void *s, int c, size_t n)
buffer = s;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
buffer[i] = c;
+ }
return s;
}
@@ -141,8 +144,9 @@ boot_strlen(const char *s)
i = 0;
- while (*s++ != '\0')
+ while (*s++ != '\0') {
i++;
+ }
return i;
}
@@ -158,16 +162,19 @@ boot_panic(const char *msg)
s = boot_panic_intro_msg;
- while ((ptr < end) && (*s != '\0'))
+ while ((ptr < end) && (*s != '\0')) {
*ptr++ = (BOOT_CGACOLOR << 8) | *s++;
+ }
s = msg;
- while ((ptr < end) && (*s != '\0'))
+ while ((ptr < end) && (*s != '\0')) {
*ptr++ = (BOOT_CGACOLOR << 8) | *s++;
+ }
- while (ptr < end)
+ while (ptr < end) {
*ptr++ = (BOOT_CGACOLOR << 8) | ' ';
+ }
cpu_halt();
@@ -180,8 +187,9 @@ boot_save_cmdline_sizes(struct multiboot_raw_info *mbi)
struct multiboot_raw_module *mod;
uint32_t i;
- if (mbi->flags & MULTIBOOT_LOADER_CMDLINE)
+ if (mbi->flags & MULTIBOOT_LOADER_CMDLINE) {
mbi->unused0 = boot_strlen((char *)(unsigned long)mbi->cmdline) + 1;
+ }
if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
unsigned long addr;
@@ -237,8 +245,9 @@ boot_register_data(const struct multiboot_raw_info *mbi)
shdr = (struct elf_shdr *)(tmp + (i * mbi->shdr_size));
if ((shdr->type != ELF_SHT_SYMTAB)
- && (shdr->type != ELF_SHT_STRTAB))
+ && (shdr->type != ELF_SHT_STRTAB)) {
continue;
+ }
biosmem_register_boot_data(shdr->addr, shdr->addr + shdr->size, true);
}
@@ -248,11 +257,13 @@ boot_register_data(const struct multiboot_raw_info *mbi)
pmap_pte_t * __boot
boot_setup_paging(struct multiboot_raw_info *mbi, unsigned long eax)
{
- if (eax != MULTIBOOT_LOADER_MAGIC)
+ if (eax != MULTIBOOT_LOADER_MAGIC) {
boot_panic(boot_panic_loader_msg);
+ }
- if (!(mbi->flags & MULTIBOOT_LOADER_MEMORY))
+ if (!(mbi->flags & MULTIBOOT_LOADER_MEMORY)) {
boot_panic(boot_panic_meminfo_msg);
+ }
/*
* Save the multiboot data passed by the boot loader, initialize the
@@ -260,8 +271,9 @@ boot_setup_paging(struct multiboot_raw_info *mbi, unsigned long eax)
*/
boot_memmove(&boot_raw_mbi, mbi, sizeof(boot_raw_mbi));
- if ((mbi->flags & MULTIBOOT_LOADER_MODULES) && (mbi->mods_count == 0))
+ if ((mbi->flags & MULTIBOOT_LOADER_MODULES) && (mbi->mods_count == 0)) {
boot_raw_mbi.flags &= ~MULTIBOOT_LOADER_MODULES;
+ }
/*
* The kernel and modules command lines will be memory mapped later
@@ -298,13 +310,15 @@ boot_save_memory(uint32_t addr, size_t size)
*/
src = vm_kmem_map_pa(addr, size, &map_addr, &map_size);
- if (src == NULL)
+ if (src == NULL) {
panic("boot: unable to map boot data in kernel map");
+ }
copy = kmem_alloc(size);
- if (copy == NULL)
+ if (copy == NULL) {
panic("boot: unable to allocate memory for boot data copy");
+ }
memcpy(copy, src, size);
vm_kmem_unmap_pa(map_addr, map_size);
@@ -323,13 +337,15 @@ boot_save_mod(struct multiboot_module *dest_mod,
size = src_mod->mod_end - src_mod->mod_start;
src = vm_kmem_map_pa(src_mod->mod_start, size, &map_addr, &map_size);
- if (src == NULL)
+ if (src == NULL) {
panic("boot: unable to map module in kernel map");
+ }
copy = kmem_alloc(size);
- if (copy == NULL)
+ if (copy == NULL) {
panic("boot: unable to allocate memory for module copy");
+ }
memcpy(copy, src, size);
vm_kmem_unmap_pa(map_addr, map_size);
@@ -337,10 +353,11 @@ boot_save_mod(struct multiboot_module *dest_mod,
dest_mod->mod_start = copy;
dest_mod->mod_end = copy + size;
- if (src_mod->string == 0)
+ if (src_mod->string == 0) {
dest_mod->string = NULL;
- else
+ } else {
dest_mod->string = boot_save_memory(src_mod->string, src_mod->reserved);
+ }
}
static void __init
@@ -361,17 +378,20 @@ boot_save_mods(void)
size = boot_raw_mbi.mods_count * sizeof(struct multiboot_raw_module);
src = vm_kmem_map_pa(boot_raw_mbi.mods_addr, size, &map_addr, &map_size);
- if (src == NULL)
+ if (src == NULL) {
panic("boot: unable to map module table in kernel map");
+ }
size = boot_raw_mbi.mods_count * sizeof(struct multiboot_module);
dest = kmem_alloc(size);
- if (dest == NULL)
+ if (dest == NULL) {
panic("boot: unable to allocate memory for the module table");
+ }
- for (i = 0; i < boot_raw_mbi.mods_count; i++)
+ for (i = 0; i < boot_raw_mbi.mods_count; i++) {
boot_save_mod(&dest[i], &src[i]);
+ }
vm_kmem_unmap_pa(map_addr, map_size);
@@ -395,8 +415,9 @@ boot_save_data(void)
if (boot_mbi.flags & MULTIBOOT_LOADER_CMDLINE)
boot_mbi.cmdline = boot_save_memory(boot_raw_mbi.cmdline,
boot_raw_mbi.unused0);
- else
+ else {
boot_mbi.cmdline = NULL;
+ }
boot_save_mods();
strace_setup(&boot_raw_mbi);
diff --git a/arch/x86/machine/cga.c b/arch/x86/machine/cga.c
index 07d4eb5e..1ab23582 100644
--- a/arch/x86/machine/cga.c
+++ b/arch/x86/machine/cga.c
@@ -137,16 +137,17 @@ cga_scroll_lines(void)
CGA_MEMORY_SIZE - (CGA_COLUMNS * 2));
last_line = (uint16_t *)cga_memory + (CGA_COLUMNS * (CGA_LINES - 1));
- for(i = 0; i < CGA_COLUMNS; i++)
+ for(i = 0; i < CGA_COLUMNS; i++) {
last_line[i] = CGA_BLANK;
+ }
}
void
cga_write_byte(uint8_t byte)
{
- if (byte == '\r')
+ if (byte == '\r') {
return;
- else if (byte == '\n') {
+ } else if (byte == '\n') {
cga_cursor += CGA_COLUMNS - cga_get_cursor_column();
if (cga_cursor >= (CGA_LINES * CGA_COLUMNS)) {
@@ -164,8 +165,9 @@ cga_write_byte(uint8_t byte)
} else if (byte == '\t') {
int i;
- for(i = 0; i < CGA_TABULATION_SPACES; i++)
+ for(i = 0; i < CGA_TABULATION_SPACES; i++) {
cga_write_byte(' ');
+ }
} else {
if ((cga_cursor + 1) >= CGA_COLUMNS * CGA_LINES) {
cga_scroll_lines();
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index 54e299dc..0f9ad343 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -242,7 +242,7 @@ cpu_seg_set_tss(char *table, unsigned int selector, struct cpu_tss *tss)
| CPU_DESC_PRESENT | CPU_DESC_TYPE_TSS
| ((base & CPU_DESC_SEG_BASE_MID_MASK) >> 16);
desc->word1 = ((base & CPU_DESC_SEG_BASE_LOW_MASK) << 16)
- | (limit & CPU_DESC_SEG_LIMIT_LOW_MASK);
+ | (limit & CPU_DESC_SEG_LIMIT_LOW_MASK);
}
/*
@@ -438,13 +438,15 @@ cpu_init(struct cpu *cpu)
cpu->type = (eax & CPU_TYPE_MASK) >> CPU_TYPE_SHIFT;
cpu->family = (eax & CPU_FAMILY_MASK) >> CPU_FAMILY_SHIFT;
- if (cpu->family == 0xf)
+ if (cpu->family == 0xf) {
cpu->family += (eax & CPU_EXTFAMILY_MASK) >> CPU_EXTFAMILY_SHIFT;
+ }
cpu->model = (eax & CPU_MODEL_MASK) >> CPU_MODEL_SHIFT;
- if ((cpu->model == 6) || (cpu->model == 0xf))
+ if ((cpu->model == 6) || (cpu->model == 0xf)) {
cpu->model += (eax & CPU_EXTMODEL_MASK) >> CPU_EXTMODEL_SHIFT;
+ }
cpu->stepping = (eax & CPU_STEPPING_MASK) >> CPU_STEPPING_SHIFT;
cpu->clflush_size = ((ebx & CPU_CLFLUSH_MASK) >> CPU_CLFLUSH_SHIFT) * 8;
@@ -455,10 +457,11 @@ cpu_init(struct cpu *cpu)
eax = 0x80000000;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax <= 0x80000000)
+ if (eax <= 0x80000000) {
max_extended = 0;
- else
+ } else {
max_extended = eax;
+ }
if (max_extended < 0x80000001) {
cpu->features3 = 0;
@@ -526,12 +529,14 @@ cpu_panic_on_missing_feature(const char *feature)
void __init
cpu_check(const struct cpu *cpu)
{
- if (!(cpu->features2 & CPU_FEATURE2_FPU))
+ if (!(cpu->features2 & CPU_FEATURE2_FPU)) {
cpu_panic_on_missing_feature("fpu");
+ }
/* TODO: support UP with legacy PIC machines */
- if (!(cpu->features2 & CPU_FEATURE2_APIC))
+ if (!(cpu->features2 & CPU_FEATURE2_APIC)) {
cpu_panic_on_missing_feature("apic");
+ }
}
void
@@ -541,8 +546,9 @@ cpu_info(const struct cpu *cpu)
cpu->id, cpu->vendor_id, cpu->type, cpu->family, cpu->model,
cpu->stepping);
- if (strlen(cpu->model_name) > 0)
+ if (strlen(cpu->model_name) > 0) {
printk("cpu%u: %s\n", cpu->id, cpu->model_name);
+ }
if ((cpu->phys_addr_width != 0) && (cpu->virt_addr_width != 0))
printk("cpu%u: address widths: physical: %hu, virtual: %hu\n",
@@ -558,8 +564,9 @@ cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
if (is_bsp) {
cpu = percpu_ptr(cpu_desc, 0);
- if (cpu->apic_id != CPU_INVALID_APIC_ID)
+ if (cpu->apic_id != CPU_INVALID_APIC_ID) {
panic("cpu: another processor pretends to be the BSP");
+ }
cpu->apic_id = apic_id;
return;
@@ -567,8 +574,9 @@ cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
error = percpu_add(cpu_nr_active);
- if (error)
+ if (error) {
return;
+ }
cpu = percpu_ptr(cpu_desc, cpu_nr_active);
cpu_preinit(cpu, cpu_nr_active, apic_id);
@@ -583,8 +591,9 @@ cpu_mp_probe(void)
error = acpimp_setup();
/* TODO Support UP with legacy PIC */
- if (error)
+ if (error) {
panic("cpu: ACPI required to initialize local APIC");
+ }
printk("cpu: %u processor(s) configured\n", cpu_count());
}
@@ -625,15 +634,17 @@ cpu_mp_setup(void)
page = vm_page_alloc(vm_page_order(STACK_SIZE), VM_PAGE_SEL_DIRECTMAP,
VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
panic("cpu: unable to allocate boot stack for cpu%u", i);
+ }
cpu->boot_stack = vm_page_direct_ptr(page);
page = vm_page_alloc(vm_page_order(STACK_SIZE), VM_PAGE_SEL_DIRECTMAP,
VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
panic("cpu: unable to allocate double fault stack for cpu%u", i);
+ }
cpu->double_fault_stack = vm_page_direct_ptr(page);
}
@@ -658,8 +669,9 @@ cpu_mp_setup(void)
lapic_ipi_startup(cpu->apic_id, BOOT_MP_TRAMPOLINE_ADDR >> 12);
cpu_delay(200);
- while (cpu->state == CPU_STATE_OFF)
+ while (cpu->state == CPU_STATE_OFF) {
cpu_pause();
+ }
}
}
@@ -683,8 +695,9 @@ cpu_halt_broadcast(void)
nr_cpus = cpu_count();
- if (nr_cpus == 1)
+ if (nr_cpus == 1) {
return;
+ }
lapic_ipi_broadcast(TRAP_CPU_HALT);
}
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index c4187d32..a18712c8 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -337,8 +337,9 @@ cpu_halt(void)
{
cpu_intr_disable();
- for (;;)
+ for (;;) {
asm volatile("hlt" : : : "memory");
+ }
}
/*
@@ -495,16 +496,16 @@ cpu_tlb_flush(void)
static __always_inline void
cpu_tlb_flush_all(void)
{
- if (!cpu_has_global_pages())
+ if (!cpu_has_global_pages()) {
cpu_tlb_flush();
- else {
+ } else {
unsigned long cr4;
cr4 = cpu_get_cr4();
- if (!(cr4 & CPU_CR4_PGE))
+ if (!(cr4 & CPU_CR4_PGE)) {
cpu_tlb_flush();
- else {
+ } else {
cr4 &= ~CPU_CR4_PGE;
cpu_set_cr4(cr4);
cr4 |= CPU_CR4_PGE;
diff --git a/arch/x86/machine/lapic.c b/arch/x86/machine/lapic.c
index 994aef62..26b0dd50 100644
--- a/arch/x86/machine/lapic.c
+++ b/arch/x86/machine/lapic.c
@@ -247,13 +247,15 @@ lapic_setup(uint32_t map_addr)
lapic_map = vm_kmem_map_pa(map_addr, sizeof(*lapic_map), NULL, NULL);
- if (lapic_map == NULL)
+ if (lapic_map == NULL) {
panic("lapic: unable to map registers in kernel map");
+ }
value = lapic_read(&lapic_map->version);
- if ((value & LAPIC_VERSION_MASK) != LAPIC_VERSION_MASK)
+ if ((value & LAPIC_VERSION_MASK) != LAPIC_VERSION_MASK) {
panic("lapic: external local APIC not supported");
+ }
lapic_setup_registers();
lapic_setup_timer();
@@ -268,8 +270,9 @@ lapic_ap_setup(void)
static void
lapic_ipi(uint32_t apic_id, uint32_t icr)
{
- if ((icr & LAPIC_ICR_DEST_MASK) == 0)
+ if ((icr & LAPIC_ICR_DEST_MASK) == 0) {
lapic_write(&lapic_map->icr_high, apic_id << LAPIC_DEST_SHIFT);
+ }
lapic_write(&lapic_map->icr_low, icr & ~LAPIC_ICR_RESERVED);
}
diff --git a/arch/x86/machine/pic.c b/arch/x86/machine/pic.c
index e0ed1e31..440ca3a7 100644
--- a/arch/x86/machine/pic.c
+++ b/arch/x86/machine/pic.c
@@ -75,8 +75,9 @@ pic_setup(void)
static void
pic_eoi(unsigned long intr)
{
- if (intr >= PIC_NR_INTRS)
+ if (intr >= PIC_NR_INTRS) {
io_write_byte(PIC_SLAVE_CMD, PIC_EOI);
+ }
io_write_byte(PIC_MASTER_CMD, PIC_EOI);
}
@@ -101,13 +102,15 @@ pic_spurious_intr(struct trap_frame *frame)
if (intr == PIC_SPURIOUS_INTR) {
isr = pic_read_isr(PIC_MASTER_CMD);
- if (isr & (1 << PIC_SPURIOUS_INTR))
+ if (isr & (1 << PIC_SPURIOUS_INTR)) {
panic("pic: real interrupt %lu", intr);
+ }
} else {
isr = pic_read_isr(PIC_SLAVE_CMD);
- if (isr & (1 << PIC_SPURIOUS_INTR))
+ if (isr & (1 << PIC_SPURIOUS_INTR)) {
panic("pic: real interrupt %lu", intr);
+ }
pic_eoi(PIC_SLAVE_INTR);
}
diff --git a/arch/x86/machine/pit.c b/arch/x86/machine/pit.c
index ea7a1a58..d3fece51 100644
--- a/arch/x86/machine/pit.c
+++ b/arch/x86/machine/pit.c
@@ -80,8 +80,9 @@ pit_delay(unsigned long usecs)
diff = prev - count;
prev = count;
- if (diff < 0)
+ if (diff < 0) {
diff += PIT_MAX_COUNT;
+ }
total -= diff;
} while (total > 0);
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index aef17410..193c1aa5 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -300,8 +300,9 @@ pmap_boot_enter(pmap_pte_t *root_ptp, unsigned long va, phys_addr_t pa,
pmap_pte_t *pt, *ptp, *pte, bits;
unsigned int level, last_level;
- if (pa != (pa & PMAP_PA_MASK))
+ if (pa != (pa & PMAP_PA_MASK)) {
boot_panic(pmap_panic_inval_msg);
+ }
switch (pgsize) {
#ifdef __LP64__
@@ -326,9 +327,9 @@ pmap_boot_enter(pmap_pte_t *root_ptp, unsigned long va, phys_addr_t pa,
pt_level = &pt_levels[level];
pte = &pt[pmap_pte_index(va, pt_level)];
- if (*pte != 0)
+ if (*pte != 0) {
ptp = (void *)(unsigned long)(*pte & PMAP_PA_MASK);
- else {
+ } else {
ptp = biosmem_bootalloc(1);
*pte = ((unsigned long)ptp | PMAP_PTE_RW | PMAP_PTE_P)
& pt_level->mask;
@@ -351,14 +352,16 @@ pmap_boot_get_pgsize(void)
eax = 0x80000000;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax <= 0x80000000)
+ if (eax <= 0x80000000) {
goto out;
+ }
eax = 0x80000001;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (edx & CPU_FEATURE4_1GP)
+ if (edx & CPU_FEATURE4_1GP) {
return (1 << PMAP_L2_SKIP);
+ }
out:
return (1 << PMAP_L1_SKIP);
@@ -366,20 +369,23 @@ out:
eax = 0;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax == 0)
+ if (eax == 0) {
goto out;
+ }
eax = 1;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
#ifdef X86_PAE
- if (!(edx & CPU_FEATURE2_PAE))
+ if (!(edx & CPU_FEATURE2_PAE)) {
boot_panic(pmap_panic_no_pae);
+ }
return (1 << PMAP_L1_SKIP);
#else /* X86_PAE */
- if (edx & CPU_FEATURE2_PSE)
+ if (edx & CPU_FEATURE2_PSE) {
return (1 << PMAP_L1_SKIP);
+ }
#endif /* X86_PAE */
out:
@@ -393,8 +399,9 @@ out:
static void __boot
pmap_boot_enable_pgext(unsigned long pgsize)
{
- if (pgsize == PAGE_SIZE)
+ if (pgsize == PAGE_SIZE) {
return;
+ }
/*
* On 64-bits systems, PAE is already enabled.
@@ -441,8 +448,9 @@ pmap_setup_paging(void)
directmap_end = biosmem_directmap_end();
- if (directmap_end > (VM_MAX_DIRECTMAP_ADDRESS - VM_MIN_DIRECTMAP_ADDRESS))
+ if (directmap_end > (VM_MAX_DIRECTMAP_ADDRESS - VM_MIN_DIRECTMAP_ADDRESS)) {
boot_panic(pmap_panic_directmap_msg);
+ }
va = VM_MIN_DIRECTMAP_ADDRESS;
pa = 0;
@@ -478,8 +486,9 @@ pmap_setup_paging(void)
cpu_table->pdpt_pa = BOOT_VTOP((unsigned long)pmap_cpu_kpdpts[0]);
root_ptp = (void *)cpu_table->pdpt_pa;
- for (i = 0; i < PMAP_NR_RPTPS; i++)
+ for (i = 0; i < PMAP_NR_RPTPS; i++) {
root_ptp[i] = (cpu_table->root_ptp_pa + (i * PAGE_SIZE)) | PMAP_PTE_P;
+ }
#endif /* X86_PAE */
return root_ptp;
@@ -597,8 +606,9 @@ pmap_walk_vas(unsigned long start, unsigned long end, pmap_walk_fn_t walk_fn)
do {
#ifdef __LP64__
/* Handle long mode canonical form */
- if (va == VM_MAX_ADDRESS)
+ if (va == VM_MAX_ADDRESS) {
va = VM_MIN_KERNEL_ADDRESS;
+ }
#endif /* __LP64__ */
level = PMAP_NR_LEVELS - 1;
@@ -610,13 +620,15 @@ pmap_walk_vas(unsigned long start, unsigned long end, pmap_walk_fn_t walk_fn)
index = pmap_pte_index(va, pt_level);
pte = &ptp[index];
- if (!pmap_pte_valid(*pte))
+ if (!pmap_pte_valid(*pte)) {
break;
+ }
walk_fn(ptp_pa, index, level);
- if ((level == 0) || pmap_pte_large(*pte))
+ if ((level == 0) || pmap_pte_large(*pte)) {
break;
+ }
level--;
ptp_pa = *pte & PMAP_PA_MASK;
@@ -635,8 +647,9 @@ pmap_setup_global_page(phys_addr_t ptp_pa, unsigned int index,
pte = &pmap_ptp_from_pa(ptp_pa)[index];
- if ((level == 0) || pmap_pte_large(*pte))
+ if ((level == 0) || pmap_pte_large(*pte)) {
*pte |= PMAP_PTE_G;
+ }
}
static void __init
@@ -666,8 +679,9 @@ pmap_update_oplist_create(struct pmap_update_oplist **oplistp)
oplist = kmem_cache_alloc(&pmap_update_oplist_cache);
- if (oplist == NULL)
+ if (oplist == NULL) {
return ERROR_NOMEM;
+ }
*oplistp = oplist;
return 0;
@@ -697,8 +711,9 @@ pmap_update_oplist_prepare(struct pmap_update_oplist *oplist,
struct pmap *pmap)
{
if (oplist->pmap != pmap) {
- if (oplist->pmap != NULL)
+ if (oplist->pmap != NULL) {
pmap_update(oplist->pmap);
+ }
oplist->pmap = pmap;
} else if (oplist->nr_ops == ARRAY_SIZE(oplist->ops)) {
@@ -710,8 +725,9 @@ pmap_update_oplist_prepare(struct pmap_update_oplist *oplist,
static struct pmap_update_op *
pmap_update_oplist_prev_op(struct pmap_update_oplist *oplist)
{
- if (oplist->nr_ops == 0)
+ if (oplist->nr_ops == 0) {
return NULL;
+ }
return &oplist->ops[oplist->nr_ops - 1];
}
@@ -746,8 +762,9 @@ pmap_update_oplist_count_mappings(const struct pmap_update_oplist *oplist,
for (i = 0; i < oplist->nr_ops; i++) {
op = &oplist->ops[i];
- if (!cpumap_test(&op->cpumap, cpu))
+ if (!cpumap_test(&op->cpumap, cpu)) {
continue;
+ }
switch (op->operation) {
case PMAP_UPDATE_OP_ENTER:
@@ -857,8 +874,9 @@ pmap_bootstrap(void)
cpumap_zero(&pmap_booter_cpumap);
cpumap_set(&pmap_booter_cpumap, 0);
- if (cpu_has_global_pages())
+ if (cpu_has_global_pages()) {
pmap_setup_global_pages();
+ }
}
void __init
@@ -866,10 +884,11 @@ pmap_ap_bootstrap(void)
{
cpu_local_assign(pmap_current_ptr, kernel_pmap);
- if (cpu_has_global_pages())
+ if (cpu_has_global_pages()) {
cpu_enable_global_pages();
- else
+ } else {
cpu_tlb_flush();
+ }
}
static void __init
@@ -880,8 +899,9 @@ pmap_setup_set_ptp_type(phys_addr_t ptp_pa, unsigned int index,
(void)index;
- if (level == 0)
+ if (level == 0) {
return;
+ }
page = vm_page_lookup(ptp_pa);
assert(page != NULL);
@@ -943,13 +963,14 @@ pmap_copy_cpu_table_recursive(const pmap_pte_t *sptp, unsigned int level,
i++, va = P2END(va, 1UL << pt_level->skip)) {
#ifdef __LP64__
/* Handle long mode canonical form */
- if (va == VM_MAX_ADDRESS)
+ if (va == VM_MAX_ADDRESS) {
va = VM_MIN_KERNEL_ADDRESS;
+ }
#endif /* __LP64__ */
- if (!pmap_pte_valid(sptp[i]))
+ if (!pmap_pte_valid(sptp[i])) {
continue;
- else if (pmap_pte_large(sptp[i])) {
+ } else if (pmap_pte_large(sptp[i])) {
dptp[i] = sptp[i];
continue;
}
@@ -959,9 +980,9 @@ pmap_copy_cpu_table_recursive(const pmap_pte_t *sptp, unsigned int level,
pa = vm_page_to_pa(page);
dptp[i] = (sptp[i] & ~PMAP_PA_MASK) | (pa & PMAP_PA_MASK);
- if (((level - 1) == 0) || pmap_pte_large(sptp[i]))
+ if (((level - 1) == 0) || pmap_pte_large(sptp[i])) {
pmap_copy_cpu_table_page(pmap_pte_next(sptp[i]), level - 1, page);
- else
+ } else
pmap_copy_cpu_table_recursive(pmap_pte_next(sptp[i]),
level - 1, page, va);
}
@@ -989,8 +1010,9 @@ pmap_copy_cpu_table(unsigned int cpu)
cpu_table->pdpt = pmap_cpu_kpdpts[cpu];
cpu_table->pdpt_pa = BOOT_VTOP((unsigned long)pmap_cpu_kpdpts[cpu]);
- for (i = 0; i < PMAP_NR_RPTPS; i++)
+ for (i = 0; i < PMAP_NR_RPTPS; i++) {
cpu_table->pdpt[i] = (cpu_table->root_ptp_pa + (i * PAGE_SIZE)) | PMAP_PTE_P;
+ }
#endif /* X86_PAE */
}
@@ -1007,8 +1029,9 @@ pmap_mp_setup(void)
error = cpumap_create(&cpumap);
- if (error)
+ if (error) {
panic("pmap: unable to create syncer cpumap");
+ }
for (cpu = 1; cpu < cpu_count(); cpu++) {
pmap_update_request_array_init(percpu_ptr(pmap_update_request_array,
@@ -1027,8 +1050,9 @@ pmap_mp_setup(void)
thread_attr_set_priority(&attr, THREAD_SCHED_RT_PRIO_MIN);
error = thread_create(&syncer->thread, &attr, pmap_sync, syncer);
- if (error)
+ if (error) {
panic("pmap: unable to create syncer thread");
+ }
oplist = thread_tsd_get(syncer->thread, pmap_oplist_tsd_key);
thread_tsd_set(syncer->thread, pmap_oplist_tsd_key, NULL);
@@ -1037,8 +1061,9 @@ pmap_mp_setup(void)
cpumap_destroy(cpumap);
- for (cpu = 1; cpu < cpu_count(); cpu++)
+ for (cpu = 1; cpu < cpu_count(); cpu++) {
pmap_copy_cpu_table(cpu);
+ }
pmap_do_remote_updates = 1;
}
@@ -1051,8 +1076,9 @@ pmap_thread_init(struct thread *thread)
error = pmap_update_oplist_create(&oplist);
- if (error)
+ if (error) {
return error;
+ }
thread_tsd_set(thread, pmap_oplist_tsd_key, oplist);
return 0;
@@ -1072,11 +1098,13 @@ pmap_kextract(unsigned long va, phys_addr_t *pap)
pt_level = &pmap_pt_levels[level];
pte = &ptp[pmap_pte_index(va, pt_level)];
- if (!pmap_pte_valid(*pte))
+ if (!pmap_pte_valid(*pte)) {
return ERROR_FAULT;
+ }
- if ((level == 0) || pmap_pte_large(*pte))
+ if ((level == 0) || pmap_pte_large(*pte)) {
break;
+ }
level--;
ptp = pmap_pte_next(*pte);
@@ -1094,11 +1122,13 @@ pmap_create(struct pmap **pmapp)
pmap = kmem_cache_alloc(&pmap_cache);
- if (pmap == NULL)
+ if (pmap == NULL) {
return ERROR_NOMEM;
+ }
- for (i = 0; i < ARRAY_SIZE(pmap->cpu_tables); i++)
+ for (i = 0; i < ARRAY_SIZE(pmap->cpu_tables); i++) {
pmap->cpu_tables[i] = NULL;
+ }
*pmapp = pmap;
return 0;
@@ -1119,8 +1149,9 @@ pmap_enter_local(struct pmap *pmap, unsigned long va, phys_addr_t pa,
pte_bits = PMAP_PTE_RW;
- if (pmap != kernel_pmap)
+ if (pmap != kernel_pmap) {
pte_bits |= PMAP_PTE_US;
+ }
level = PMAP_NR_LEVELS - 1;
ptp = pmap_ptp_from_pa(pmap->cpu_tables[cpu_id()]->root_ptp_pa);
@@ -1129,12 +1160,13 @@ pmap_enter_local(struct pmap *pmap, unsigned long va, phys_addr_t pa,
pt_level = &pmap_pt_levels[level];
pte = &ptp[pmap_pte_index(va, pt_level)];
- if (level == 0)
+ if (level == 0) {
break;
+ }
- if (pmap_pte_valid(*pte))
+ if (pmap_pte_valid(*pte)) {
ptp = pmap_pte_next(*pte);
- else {
+ } else {
page = vm_page_alloc(0, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_PMAP);
assert(page != NULL);
ptp_pa = vm_page_to_pa(page);
@@ -1166,9 +1198,9 @@ pmap_enter(struct pmap *pmap, unsigned long va, phys_addr_t pa,
pmap_update_oplist_prepare(oplist, pmap);
op = pmap_update_oplist_prepare_op(oplist);
- if (flags & PMAP_PEF_GLOBAL)
+ if (flags & PMAP_PEF_GLOBAL) {
cpumap_copy(&op->cpumap, cpumap_all());
- else {
+ } else {
cpumap_zero(&op->cpumap);
cpumap_set(&op->cpumap, cpu_id());
}
@@ -1195,8 +1227,9 @@ pmap_remove_local_single(struct pmap *pmap, unsigned long va)
pt_level = &pmap_pt_levels[level];
pte = &ptp[pmap_pte_index(va, pt_level)];
- if (level == 0)
+ if (level == 0) {
break;
+ }
level--;
ptp = pmap_pte_next(*pte);
@@ -1295,8 +1328,9 @@ pmap_protect(struct pmap *pmap, unsigned long va, int prot,
static void
pmap_flush_tlb(struct pmap *pmap, unsigned long start, unsigned long end)
{
- if ((pmap != pmap_current()) && (pmap != kernel_pmap))
+ if ((pmap != pmap_current()) && (pmap != kernel_pmap)) {
return;
+ }
while (start < end) {
cpu_tlb_flush_va(start);
@@ -1307,13 +1341,15 @@ pmap_flush_tlb(struct pmap *pmap, unsigned long start, unsigned long end)
static void
pmap_flush_tlb_all(struct pmap *pmap)
{
- if ((pmap != pmap_current()) && (pmap != kernel_pmap))
+ if ((pmap != pmap_current()) && (pmap != kernel_pmap)) {
return;
+ }
- if (pmap == kernel_pmap)
+ if (pmap == kernel_pmap) {
cpu_tlb_flush_all();
- else
+ } else {
cpu_tlb_flush();
+ }
}
static void
@@ -1322,8 +1358,9 @@ pmap_update_enter(struct pmap *pmap, int flush,
{
pmap_enter_local(pmap, args->va, args->pa, args->prot, args->flags);
- if (flush)
+ if (flush) {
pmap_flush_tlb(pmap, args->va, args->va + PAGE_SIZE);
+ }
}
static void
@@ -1332,8 +1369,9 @@ pmap_update_remove(struct pmap *pmap, int flush,
{
pmap_remove_local(pmap, args->start, args->end);
- if (flush)
+ if (flush) {
pmap_flush_tlb(pmap, args->start, args->end);
+ }
}
static void
@@ -1342,8 +1380,9 @@ pmap_update_protect(struct pmap *pmap, int flush,
{
pmap_protect_local(pmap, args->start, args->end, args->prot);
- if (flush)
+ if (flush) {
pmap_flush_tlb(pmap, args->start, args->end);
+ }
}
static void
@@ -1362,8 +1401,9 @@ pmap_update_local(const struct pmap_update_oplist *oplist,
for (i = 0; i < oplist->nr_ops; i++) {
op = &oplist->ops[i];
- if (!cpumap_test(&op->cpumap, cpu_id()))
+ if (!cpumap_test(&op->cpumap, cpu_id())) {
continue;
+ }
switch (op->operation) {
case PMAP_UPDATE_OP_ENTER:
@@ -1386,8 +1426,9 @@ pmap_update_local(const struct pmap_update_oplist *oplist,
}
}
- if (global_tlb_flush)
+ if (global_tlb_flush) {
pmap_flush_tlb_all(oplist->pmap);
+ }
}
void
@@ -1403,8 +1444,9 @@ pmap_update(struct pmap *pmap)
oplist = pmap_update_oplist_get();
- if (pmap != oplist->pmap)
+ if (pmap != oplist->pmap) {
return;
+ }
assert(oplist->nr_ops != 0);
@@ -1435,8 +1477,9 @@ pmap_update(struct pmap *pmap)
mutex_lock(&request->lock);
- while (!request->done)
+ while (!request->done) {
condition_wait(&request->cond, &request->lock);
+ }
mutex_unlock(&request->lock);
}
@@ -1462,8 +1505,9 @@ pmap_sync(void *arg)
for (;;) {
mutex_lock(&queue->lock);
- while (list_empty(&queue->requests))
+ while (list_empty(&queue->requests)) {
condition_wait(&queue->cond, &queue->lock);
+ }
request = list_first_entry(&queue->requests,
struct pmap_update_request, node);
@@ -1488,8 +1532,9 @@ pmap_load(struct pmap *pmap)
assert(!cpu_intr_enabled());
assert(!thread_preempt_enabled());
- if (pmap_current() == pmap)
+ if (pmap_current() == pmap) {
return;
+ }
/* TODO Lazy TLB invalidation */
diff --git a/arch/x86/machine/strace.c b/arch/x86/machine/strace.c
index 6058c561..42af6b20 100644
--- a/arch/x86/machine/strace.c
+++ b/arch/x86/machine/strace.c
@@ -46,15 +46,18 @@ strace_lookup(unsigned long addr, unsigned long *offset, unsigned long *size)
for (sym = strace_symtab; sym < strace_symtab_end; sym++) {
if ((sym->size != 0)
&& (addr >= sym->value)
- && (addr <= (sym->value + sym->size)))
+ && (addr <= (sym->value + sym->size))) {
break;
+ }
}
- if (sym >= strace_symtab_end)
+ if (sym >= strace_symtab_end) {
return NULL;
+ }
- if (sym->name == 0)
+ if (sym->name == 0) {
return NULL;
+ }
*offset = addr - sym->value;
*size = sym->size;
@@ -69,9 +72,9 @@ strace_show_one(unsigned int index, unsigned long ip)
name = strace_lookup(ip, &offset, &size);
- if (name == NULL)
+ if (name == NULL) {
printk("strace: #%u [" STRACE_ADDR_FORMAT "]\n", index, ip);
- else
+ } else
printk("strace: #%u [" STRACE_ADDR_FORMAT "] %s+%#lx/%#lx\n",
index, ip, name, offset, size);
}
@@ -91,8 +94,9 @@ strace_show(unsigned long ip, unsigned long bp)
frame = (void **)bp;
for (;;) {
- if (frame == NULL)
+ if (frame == NULL) {
break;
+ }
error = pmap_kextract((unsigned long)&frame[1], &pa);
@@ -160,8 +164,9 @@ strace_lookup_section(const struct multiboot_raw_info *mbi, const void *table,
shdr = table + (i * mbi->shdr_size);
shdr_name = &shstrtab[shdr->name];
- if (strcmp(shdr_name, name) == 0)
+ if (strcmp(shdr_name, name) == 0) {
return shdr;
+ }
}
return NULL;
@@ -176,8 +181,9 @@ strace_setup(const struct multiboot_raw_info *mbi)
const char *shstrtab;
const void *table;
- if (!(mbi->flags & MULTIBOOT_LOADER_SHDR) || (mbi->shdr_num == 0))
+ if (!(mbi->flags & MULTIBOOT_LOADER_SHDR) || (mbi->shdr_num == 0)) {
goto no_syms;
+ }
size = mbi->shdr_num * mbi->shdr_size;
table = vm_kmem_map_pa(mbi->shdr_addr, size, &map_addr, &map_size);
@@ -217,14 +223,16 @@ strace_setup(const struct multiboot_raw_info *mbi)
strace_symtab = strace_copy_section(symtab_hdr);
- if (strace_symtab == NULL)
+ if (strace_symtab == NULL) {
goto error_symtab;
+ }
strace_symtab_end = (void *)strace_symtab + symtab_hdr->size;
strace_strtab = strace_copy_section(strtab_hdr);
- if (strace_strtab == NULL)
+ if (strace_strtab == NULL) {
goto error_strtab;
+ }
vm_kmem_unmap_pa(shstrtab_map_addr, shstrtab_map_size);
vm_kmem_unmap_pa(map_addr, map_size);
diff --git a/arch/x86/machine/string.c b/arch/x86/machine/string.c
index e78c41cd..26ce941b 100644
--- a/arch/x86/machine/string.c
+++ b/arch/x86/machine/string.c
@@ -78,8 +78,9 @@ memcmp(const void *s1, const void *s2, size_t n)
{
unsigned char c1, c2;
- if (n == 0)
+ if (n == 0) {
return 0;
+ }
asm volatile("repe cmpsb"
: "+D" (s1), "+S" (s2), "+c" (n)
diff --git a/arch/x86/machine/tcb.c b/arch/x86/machine/tcb.c
index a154be11..df1b222c 100644
--- a/arch/x86/machine/tcb.c
+++ b/arch/x86/machine/tcb.c
@@ -40,8 +40,9 @@ tcb_init(struct tcb *tcb, void *stack, void (*fn)(void))
error = pmap_thread_init(thread_from_tcb(tcb));
- if (error)
+ if (error) {
return error;
+ }
tcb->bp = 0;
tcb->sp = (unsigned long)stack + STACK_SIZE - sizeof(unsigned long);
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c
index 15d850c2..49198123 100644
--- a/arch/x86/machine/trap.c
+++ b/arch/x86/machine/trap.c
@@ -171,8 +171,9 @@ trap_setup(void)
{
size_t i;
- for (i = 0; i < CPU_IDT_SIZE; i++)
+ for (i = 0; i < CPU_IDT_SIZE; i++) {
trap_install(i, TRAP_HF_NOPREEMPT, trap_isr_default, trap_default);
+ }
/* Architecture defined traps */
trap_install(TRAP_DE, 0, trap_isr_divide_error, trap_default);
@@ -233,13 +234,15 @@ trap_main(struct trap_frame *frame)
* latter (usually device interrupts), disable preemption to make sure
* there won't be any context switch while handling them.
*/
- if (handler->flags & TRAP_HF_NOPREEMPT)
+ if (handler->flags & TRAP_HF_NOPREEMPT) {
thread_preempt_disable();
+ }
handler->fn(frame);
- if (handler->flags & TRAP_HF_NOPREEMPT)
+ if (handler->flags & TRAP_HF_NOPREEMPT) {
thread_preempt_enable_no_resched();
+ }
thread_schedule();
}
diff --git a/kern/bitmap.c b/kern/bitmap.c
index af19805f..b3117dfb 100644
--- a/kern/bitmap.c
+++ b/kern/bitmap.c
@@ -31,8 +31,9 @@ bitmap_cmp(const unsigned long *a, const unsigned long *b, int nr_bits)
if (n != 0) {
rv = memcmp(a, b, n * sizeof(unsigned long));
- if (rv != 0)
+ if (rv != 0) {
return rv;
+ }
nr_bits -= n * LONG_BIT;
}
@@ -46,19 +47,21 @@ bitmap_cmp(const unsigned long *a, const unsigned long *b, int nr_bits)
last_b &= mask;
}
- if (last_a == last_b)
+ if (last_a == last_b) {
return 0;
- else if (last_a < last_b)
+ } else if (last_a < last_b) {
return -1;
- else
+ } else {
return 1;
+ }
}
static inline unsigned long
bitmap_find_next_compute_complement(unsigned long word, int nr_bits)
{
- if (nr_bits < LONG_BIT)
+ if (nr_bits < LONG_BIT) {
word |= (((unsigned long)-1) << nr_bits);
+ }
return ~word;
}
@@ -80,27 +83,32 @@ bitmap_find_next_bit(const unsigned long *bm, int nr_bits, int bit,
word = *bm;
- if (complement)
+ if (complement) {
word = bitmap_find_next_compute_complement(word, nr_bits);
+ }
- if (bit < LONG_BIT)
+ if (bit < LONG_BIT) {
word &= ~(bitmap_mask(bit) - 1);
+ }
for (;;) {
bit = __builtin_ffsl(word);
- if (bit != 0)
+ if (bit != 0) {
return ((bm - start) * LONG_BIT) + bit - 1;
+ }
bm++;
- if (bm >= end)
+ if (bm >= end) {
return -1;
+ }
nr_bits -= LONG_BIT;
word = *bm;
- if (complement)
+ if (complement) {
word = bitmap_find_next_compute_complement(word, nr_bits);
+ }
}
}
diff --git a/kern/bitmap.h b/kern/bitmap.h
index 8c5980a1..6489b486 100644
--- a/kern/bitmap.h
+++ b/kern/bitmap.h
@@ -63,8 +63,9 @@ bitmap_copy(unsigned long *dest, const unsigned long *src, int nr_bits)
static inline void
bitmap_set(unsigned long *bm, int bit)
{
- if (bit >= LONG_BIT)
+ if (bit >= LONG_BIT) {
bitmap_lookup(bm, bit);
+ }
*bm |= bitmap_mask(bit);
}
@@ -72,8 +73,9 @@ bitmap_set(unsigned long *bm, int bit)
static inline void
bitmap_set_atomic(unsigned long *bm, int bit)
{
- if (bit >= LONG_BIT)
+ if (bit >= LONG_BIT) {
bitmap_lookup(bm, bit);
+ }
atomic_or_ulong(bm, bitmap_mask(bit));
}
@@ -81,8 +83,9 @@ bitmap_set_atomic(unsigned long *bm, int bit)
static inline void
bitmap_clear(unsigned long *bm, int bit)
{
- if (bit >= LONG_BIT)
+ if (bit >= LONG_BIT) {
bitmap_lookup(bm, bit);
+ }
*bm &= ~bitmap_mask(bit);
}
@@ -90,8 +93,9 @@ bitmap_clear(unsigned long *bm, int bit)
static inline void
bitmap_clear_atomic(unsigned long *bm, int bit)
{
- if (bit >= LONG_BIT)
+ if (bit >= LONG_BIT) {
bitmap_lookup(bm, bit);
+ }
atomic_and_ulong(bm, ~bitmap_mask(bit));
}
@@ -99,8 +103,9 @@ bitmap_clear_atomic(unsigned long *bm, int bit)
static inline int
bitmap_test(const unsigned long *bm, int bit)
{
- if (bit >= LONG_BIT)
+ if (bit >= LONG_BIT) {
bitmap_lookup(bm, bit);
+ }
return ((*bm & bitmap_mask(bit)) != 0);
}
@@ -112,8 +117,9 @@ bitmap_and(unsigned long *a, const unsigned long *b, int nr_bits)
n = BITMAP_LONGS(nr_bits);
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
a[i] &= b[i];
+ }
}
static inline void
@@ -123,8 +129,9 @@ bitmap_or(unsigned long *a, const unsigned long *b, int nr_bits)
n = BITMAP_LONGS(nr_bits);
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
a[i] |= b[i];
+ }
}
static inline void
@@ -134,8 +141,9 @@ bitmap_xor(unsigned long *a, const unsigned long *b, int nr_bits)
n = BITMAP_LONGS(nr_bits);
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
a[i] ^= b[i];
+ }
}
static inline int
diff --git a/kern/condition.c b/kern/condition.c
index 0012aa18..2c233df0 100644
--- a/kern/condition.c
+++ b/kern/condition.c
@@ -44,8 +44,9 @@ condition_wait(struct condition *condition, struct mutex *mutex)
assert((condition->mutex == NULL) || (condition->mutex == mutex));
- if (condition->mutex == NULL)
+ if (condition->mutex == NULL) {
condition->mutex = mutex;
+ }
list_insert_tail(&condition->waiters, &waiter.node);
@@ -53,8 +54,9 @@ condition_wait(struct condition *condition, struct mutex *mutex)
state = mutex_release(mutex);
- if (state == MUTEX_CONTENDED)
+ if (state == MUTEX_CONTENDED) {
mutex_signal(mutex);
+ }
spinlock_unlock(&condition->lock);
@@ -82,8 +84,9 @@ condition_signal(struct condition *condition)
waiter = list_first_entry(&condition->waiters, struct mutex_waiter, node);
list_remove(&waiter->node);
- if (list_empty(&condition->waiters))
+ if (list_empty(&condition->waiters)) {
condition->mutex = NULL;
+ }
spinlock_unlock(&condition->lock);
diff --git a/kern/cpumap.c b/kern/cpumap.c
index ccec0d22..2cc3511c 100644
--- a/kern/cpumap.c
+++ b/kern/cpumap.c
@@ -36,8 +36,9 @@ cpumap_setup(void)
cpumap_zero(&cpumap_active_cpus);
nr_cpus = cpu_count();
- for (i = 0; i < nr_cpus; i++)
+ for (i = 0; i < nr_cpus; i++) {
cpumap_set(&cpumap_active_cpus, i);
+ }
}
const struct cpumap *
@@ -53,8 +54,9 @@ cpumap_create(struct cpumap **cpumapp)
cpumap = kmem_cache_alloc(&cpumap_cache);
- if (cpumap == NULL)
+ if (cpumap == NULL) {
return ERROR_NOMEM;
+ }
*cpumapp = cpumap;
return 0;
@@ -73,8 +75,9 @@ cpumap_check(const struct cpumap *cpumap)
index = bitmap_find_first(cpumap->cpus, cpu_count());
- if (index == -1)
+ if (index == -1) {
return ERROR_INVAL;
+ }
return 0;
}
diff --git a/kern/error.c b/kern/error.c
index a86e913c..043cdb98 100644
--- a/kern/error.c
+++ b/kern/error.c
@@ -42,8 +42,9 @@ error_str(int error)
void
error_check(int error, const char *prefix)
{
- if (!error)
+ if (!error) {
return;
+ }
panic("%s%s%s",
(prefix == NULL) ? "" : prefix,
diff --git a/kern/evcnt.c b/kern/evcnt.c
index a4eab794..803516d0 100644
--- a/kern/evcnt.c
+++ b/kern/evcnt.c
@@ -63,8 +63,9 @@ evcnt_info(const char *pattern)
length = strlen(evcnt->name);
if ((length < pattern_length)
- || (memcmp(evcnt->name, pattern, pattern_length) != 0))
+ || (memcmp(evcnt->name, pattern, pattern_length) != 0)) {
continue;
+ }
}
printk("evcnt: %-30s %17llu\n", evcnt->name, evcnt->count);
diff --git a/kern/kmem.c b/kern/kmem.c
index 3f1d0d1a..59bb99d4 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -165,8 +165,9 @@ kmem_buf_verify_bytes(void *buf, void *pattern, size_t size)
end = buf + size;
for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++)
- if (*ptr != *pattern_ptr)
+ if (*ptr != *pattern_ptr) {
return ptr;
+ }
return NULL;
}
@@ -181,8 +182,9 @@ kmem_buf_fill(void *buf, uint64_t pattern, size_t size)
end = buf + size;
- for (ptr = buf; ptr < end; ptr++)
+ for (ptr = buf; ptr < end; ptr++) {
*ptr = pattern;
+ }
}
static void *
@@ -196,8 +198,9 @@ kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new, size_t size)
end = buf + size;
for (ptr = buf; ptr < end; ptr++) {
- if (*ptr != old)
+ if (*ptr != old) {
return kmem_buf_verify_bytes(ptr, &old, sizeof(old));
+ }
*ptr = new;
}
@@ -261,8 +264,9 @@ kmem_slab_create(struct kmem_cache *cache, size_t color)
page = vm_page_alloc(cache->slab_order, VM_PAGE_SEL_DIRECTMAP,
VM_PAGE_KMEM);
- if (page == NULL)
+ if (page == NULL) {
return NULL;
+ }
slab_buf = vm_page_direct_ptr(page);
@@ -291,8 +295,9 @@ kmem_slab_create(struct kmem_cache *cache, size_t color)
bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size);
}
- if (cache->flags & KMEM_CF_VERIFY)
+ if (cache->flags & KMEM_CF_VERIFY) {
kmem_slab_create_verify(slab, cache);
+ }
return slab;
}
@@ -383,11 +388,13 @@ kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache)
for (i = 0; i < cpu_pool->transfer_size; i++) {
buf = kmem_cache_alloc_from_slab(cache);
- if (buf == NULL)
+ if (buf == NULL) {
break;
+ }
- if (ctor != NULL)
+ if (ctor != NULL) {
ctor(buf);
+ }
kmem_cpu_pool_push(cpu_pool, buf);
}
@@ -466,8 +473,9 @@ kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
buf_size = cache->buf_size;
- if (buf_size < KMEM_BUF_SIZE_THRESHOLD)
+ if (buf_size < KMEM_BUF_SIZE_THRESHOLD) {
flags |= KMEM_CACHE_NOOFFSLAB;
+ }
i = 0;
waste_min = (size_t)-1;
@@ -479,18 +487,20 @@ kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
slab_size = PAGE_SIZE << slab_order;
free_slab_size = slab_size;
- if (flags & KMEM_CACHE_NOOFFSLAB)
+ if (flags & KMEM_CACHE_NOOFFSLAB) {
free_slab_size -= sizeof(struct kmem_slab);
+ }
buffers = free_slab_size / buf_size;
waste = free_slab_size % buf_size;
- if (buffers > i)
+ if (buffers > i) {
i = buffers;
+ }
- if (flags & KMEM_CACHE_NOOFFSLAB)
+ if (flags & KMEM_CACHE_NOOFFSLAB) {
embed = 1;
- else if (sizeof(struct kmem_slab) <= waste) {
+ } else if (sizeof(struct kmem_slab) <= waste) {
embed = 1;
waste -= sizeof(struct kmem_slab);
} else {
@@ -515,12 +525,14 @@ kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
cache->bufs_per_slab = slab_size / buf_size;
cache->color_max = slab_size % buf_size;
- if (cache->color_max >= PAGE_SIZE)
+ if (cache->color_max >= PAGE_SIZE) {
cache->color_max = PAGE_SIZE - 1;
+ }
if (optimal_embed) {
- if (cache->slab_size == PAGE_SIZE)
+ if (cache->slab_size == PAGE_SIZE) {
cache->flags |= KMEM_CF_DIRECT;
+ }
} else {
cache->flags |= KMEM_CF_SLAB_EXTERNAL;
}
@@ -539,11 +551,13 @@ kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size,
cache->flags = 0;
#endif /* KMEM_CF_VERIFY */
- if (flags & KMEM_CACHE_VERIFY)
+ if (flags & KMEM_CACHE_VERIFY) {
cache->flags |= KMEM_CF_VERIFY;
+ }
- if (align < KMEM_ALIGN_MIN)
+ if (align < KMEM_ALIGN_MIN) {
align = KMEM_ALIGN_MIN;
+ }
assert(obj_size > 0);
assert(ISP2(align));
@@ -586,8 +600,9 @@ kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size,
cache->cpu_pool_type = cpu_pool_type;
- for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++)
+ for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) {
kmem_cpu_pool_init(&cache->cpu_pools[i], cache);
+ }
mutex_lock(&kmem_cache_list_lock);
list_insert_tail(&kmem_cache_list, &cache->node);
@@ -617,8 +632,9 @@ kmem_cache_grow(struct kmem_cache *cache)
color = cache->color;
cache->color += cache->align;
- if (cache->color > cache->color_max)
+ if (cache->color > cache->color_max) {
cache->color = 0;
+ }
mutex_unlock(&cache->lock);
@@ -632,8 +648,9 @@ kmem_cache_grow(struct kmem_cache *cache)
cache->nr_slabs++;
cache->nr_free_slabs++;
- if (kmem_slab_lookup_needed(cache->flags))
+ if (kmem_slab_lookup_needed(cache->flags)) {
kmem_slab_vmref(slab, cache->slab_size);
+ }
}
/*
@@ -658,12 +675,13 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache)
struct kmem_slab *slab;
union kmem_bufctl *bufctl;
- if (!list_empty(&cache->partial_slabs))
+ if (!list_empty(&cache->partial_slabs)) {
slab = list_first_entry(&cache->partial_slabs, struct kmem_slab, node);
- else if (!list_empty(&cache->free_slabs))
+ } else if (!list_empty(&cache->free_slabs)) {
slab = list_first_entry(&cache->free_slabs, struct kmem_slab, node);
- else
+ } else {
return NULL;
+ }
bufctl = slab->first_free;
assert(bufctl != NULL);
@@ -675,8 +693,9 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache)
/* The slab has become complete */
list_remove(&slab->node);
- if (slab->nr_refs == 1)
+ if (slab->nr_refs == 1) {
cache->nr_free_slabs--;
+ }
} else if (slab->nr_refs == 1) {
/*
* The slab has become partial. Insert the new slab at the end of
@@ -727,8 +746,9 @@ kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
if (slab->nr_refs == 0) {
/* The slab has become free */
- if (cache->bufs_per_slab != 1)
+ if (cache->bufs_per_slab != 1) {
list_remove(&slab->node);
+ }
list_insert_head(&cache->free_slabs, &slab->node);
cache->nr_free_slabs++;
@@ -747,14 +767,16 @@ kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, int construct)
buftag = kmem_buf_to_buftag(buf, cache);
- if (buftag->state != KMEM_BUFTAG_FREE)
+ if (buftag->state != KMEM_BUFTAG_FREE) {
kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+ }
addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN, KMEM_UNINIT_PATTERN,
cache->bufctl_dist);
- if (addr != NULL)
+ if (addr != NULL) {
kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr);
+ }
addr = buf + cache->obj_size;
memset(addr, KMEM_REDZONE_BYTE, cache->redzone_pad);
@@ -763,8 +785,9 @@ kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, int construct)
bufctl->redzone = KMEM_REDZONE_WORD;
buftag->state = KMEM_BUFTAG_ALLOC;
- if (construct && (cache->ctor != NULL))
+ if (construct && (cache->ctor != NULL)) {
cache->ctor(buf);
+ }
}
void *
@@ -786,8 +809,9 @@ fast_alloc:
mutex_unlock(&cpu_pool->lock);
thread_unpin();
- if (verify)
+ if (verify) {
kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT);
+ }
return buf;
}
@@ -801,8 +825,9 @@ fast_alloc:
filled = kmem_cache_grow(cache);
- if (!filled)
+ if (!filled) {
return NULL;
+ }
thread_pin();
cpu_pool = kmem_cpu_pool_get(cache);
@@ -823,17 +848,20 @@ slab_alloc:
if (buf == NULL) {
filled = kmem_cache_grow(cache);
- if (!filled)
+ if (!filled) {
return NULL;
+ }
goto slab_alloc;
}
- if (cache->flags & KMEM_CF_VERIFY)
+ if (cache->flags & KMEM_CF_VERIFY) {
kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT);
+ }
- if (cache->ctor != NULL)
+ if (cache->ctor != NULL) {
cache->ctor(buf);
+ }
return buf;
}
@@ -850,22 +878,26 @@ kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
page = vm_page_lookup(vm_page_direct_pa((unsigned long)buf));
- if (page == NULL)
+ if (page == NULL) {
kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+ }
slab = page->slab_priv;
- if (slab == NULL)
+ if (slab == NULL) {
kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+ }
slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE);
- if ((unsigned long)buf >= slabend)
+ if ((unsigned long)buf >= slabend) {
kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+ }
if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size)
- != 0)
+ != 0) {
kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+ }
/*
* As the buffer address is valid, accessing its buftag is safe.
@@ -873,18 +905,20 @@ kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
buftag = kmem_buf_to_buftag(buf, cache);
if (buftag->state != KMEM_BUFTAG_ALLOC) {
- if (buftag->state == KMEM_BUFTAG_FREE)
+ if (buftag->state == KMEM_BUFTAG_FREE) {
kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE, NULL);
- else
+ } else {
kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+ }
}
redzone_byte = buf + cache->obj_size;
bufctl = kmem_buf_to_bufctl(buf, cache);
while (redzone_byte < (unsigned char *)bufctl) {
- if (*redzone_byte != KMEM_REDZONE_BYTE)
+ if (*redzone_byte != KMEM_REDZONE_BYTE) {
kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+ }
redzone_byte++;
}
@@ -977,8 +1011,9 @@ kmem_cache_info(struct kmem_cache *cache)
if (cache == NULL) {
mutex_lock(&kmem_cache_list_lock);
- list_for_each_entry(&kmem_cache_list, cache, node)
+ list_for_each_entry(&kmem_cache_list, cache, node) {
kmem_cache_info(cache);
+ }
mutex_unlock(&kmem_cache_list_lock);
@@ -986,9 +1021,9 @@ kmem_cache_info(struct kmem_cache *cache)
}
snprintf(flags_str, sizeof(flags_str), "%s%s%s",
- (cache->flags & KMEM_CF_DIRECT) ? " DIRECT" : "",
- (cache->flags & KMEM_CF_SLAB_EXTERNAL) ? " SLAB_EXTERNAL" : "",
- (cache->flags & KMEM_CF_VERIFY) ? " VERIFY" : "");
+ (cache->flags & KMEM_CF_DIRECT) ? " DIRECT" : "",
+ (cache->flags & KMEM_CF_SLAB_EXTERNAL) ? " SLAB_EXTERNAL" : "",
+ (cache->flags & KMEM_CF_VERIFY) ? " VERIFY" : "");
mutex_lock(&cache->lock);
@@ -1079,8 +1114,9 @@ kmem_alloc(size_t size)
size_t index;
void *buf;
- if (size == 0)
+ if (size == 0) {
return NULL;
+ }
index = kmem_get_index(size);
@@ -1090,21 +1126,23 @@ kmem_alloc(size_t size)
cache = &kmem_caches[index];
buf = kmem_cache_alloc(cache);
- if ((buf != NULL) && (cache->flags & KMEM_CF_VERIFY))
+ if ((buf != NULL) && (cache->flags & KMEM_CF_VERIFY)) {
kmem_alloc_verify(cache, buf, size);
+ }
} else {
struct vm_page *page;
page = vm_page_alloc(vm_page_order(size), VM_PAGE_SEL_DIRECTMAP,
VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
return NULL;
+ }
buf = vm_page_direct_ptr(page);
}
- return buf;
+ return buf;
}
void *
@@ -1114,8 +1152,9 @@ kmem_zalloc(size_t size)
ptr = kmem_alloc(size);
- if (ptr == NULL)
+ if (ptr == NULL) {
return NULL;
+ }
memset(ptr, 0, size);
return ptr;
@@ -1132,8 +1171,9 @@ kmem_free_verify(struct kmem_cache *cache, void *buf, size_t size)
redzone_end = buf + cache->obj_size;
while (redzone_byte < redzone_end) {
- if (*redzone_byte != KMEM_REDZONE_BYTE)
+ if (*redzone_byte != KMEM_REDZONE_BYTE) {
kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+ }
redzone_byte++;
}
@@ -1144,8 +1184,9 @@ kmem_free(void *ptr, size_t size)
{
size_t index;
- if ((ptr == NULL) || (size == 0))
+ if ((ptr == NULL) || (size == 0)) {
return;
+ }
index = kmem_get_index(size);
@@ -1154,8 +1195,9 @@ kmem_free(void *ptr, size_t size)
cache = &kmem_caches[index];
- if (cache->flags & KMEM_CF_VERIFY)
+ if (cache->flags & KMEM_CF_VERIFY) {
kmem_free_verify(cache, ptr, size);
+ }
kmem_cache_free(cache, ptr);
} else {
diff --git a/kern/list.h b/kern/list.h
index e64f5fd9..c85e8c15 100644
--- a/kern/list.h
+++ b/kern/list.h
@@ -186,8 +186,9 @@ list_concat(struct list *list1, const struct list *list2)
{
struct list *last1, *first2, *last2;
- if (list_empty(list2))
+ if (list_empty(list2)) {
return;
+ }
last1 = list1->prev;
first2 = list2->next;
diff --git a/kern/llsync.c b/kern/llsync.c
index 489e1539..db356ff1 100644
--- a/kern/llsync.c
+++ b/kern/llsync.c
@@ -126,8 +126,9 @@ llsync_process_global_checkpoint(void)
work_queue_transfer(&llsync_data.queue1, &llsync_data.queue0);
work_queue_init(&llsync_data.queue0);
- if (work_queue_nr_works(&queue) != 0)
+ if (work_queue_nr_works(&queue) != 0) {
work_queue_schedule(&queue, 0);
+ }
llsync_data.gcid.value++;
evcnt_inc(&llsync_data.ev_global_checkpoint);
@@ -136,8 +137,9 @@ llsync_process_global_checkpoint(void)
static void
llsync_flush_works(struct llsync_cpu_data *cpu_data)
{
- if (work_queue_nr_works(&cpu_data->queue0) == 0)
+ if (work_queue_nr_works(&cpu_data->queue0) == 0) {
return;
+ }
work_queue_concat(&llsync_data.queue0, &cpu_data->queue0);
work_queue_init(&cpu_data->queue0);
@@ -150,14 +152,16 @@ llsync_commit_checkpoint(unsigned int cpu)
pending = cpumap_test(&llsync_data.pending_checkpoints, cpu);
- if (!pending)
+ if (!pending) {
return;
+ }
cpumap_clear(&llsync_data.pending_checkpoints, cpu);
llsync_data.nr_pending_checkpoints--;
- if (llsync_data.nr_pending_checkpoints == 0)
+ if (llsync_data.nr_pending_checkpoints == 0) {
llsync_process_global_checkpoint();
+ }
}
void
@@ -184,8 +188,9 @@ llsync_register(void)
assert(!cpumap_test(&llsync_data.pending_checkpoints, cpu));
if ((llsync_data.nr_registered_cpus == 1)
- && (llsync_data.nr_pending_checkpoints == 0))
+ && (llsync_data.nr_pending_checkpoints == 0)) {
llsync_process_global_checkpoint();
+ }
spinlock_unlock_intr_restore(&llsync_data.lock, flags);
}
@@ -252,12 +257,12 @@ llsync_report_periodic_event(void)
* Check whether this periodic event occurred during a read-side critical
* section, and if not, trigger a checkpoint.
*/
- if (cpu_data->gcid == gcid)
+ if (cpu_data->gcid == gcid) {
llsync_commit_checkpoint(cpu_id());
- else {
- if (thread_llsync_in_read_cs())
+ } else {
+ if (thread_llsync_in_read_cs()) {
evcnt_inc(&llsync_data.ev_failed_periodic_checkin);
- else {
+ } else {
cpu_data->gcid = gcid;
evcnt_inc(&llsync_data.ev_periodic_checkin);
llsync_commit_checkpoint(cpu_id());
@@ -308,8 +313,9 @@ llsync_wait(void)
mutex_lock(&waiter.lock);
- while (!waiter.done)
+ while (!waiter.done) {
condition_wait(&waiter.cond, &waiter.lock);
+ }
mutex_unlock(&waiter.lock);
}
diff --git a/kern/llsync.h b/kern/llsync.h
index 0d7438bb..57ad5589 100644
--- a/kern/llsync.h
+++ b/kern/llsync.h
@@ -107,8 +107,9 @@ llsync_read_enter(void)
in_read_cs = thread_llsync_in_read_cs();
thread_llsync_read_inc();
- if (!in_read_cs)
+ if (!in_read_cs) {
thread_preempt_disable();
+ }
}
static inline void
@@ -116,8 +117,9 @@ llsync_read_exit(void)
{
thread_llsync_read_dec();
- if (!thread_llsync_in_read_cs())
+ if (!thread_llsync_in_read_cs()) {
thread_preempt_enable();
+ }
}
/*
diff --git a/kern/log2.h b/kern/log2.h
index c9cc5be1..0a3768a7 100644
--- a/kern/log2.h
+++ b/kern/log2.h
@@ -36,8 +36,9 @@ iorder2(unsigned long size)
{
assert(size != 0);
- if (size == 1)
+ if (size == 1) {
return 0;
+ }
return ilog2(size - 1) + 1;
}
diff --git a/kern/mutex.h b/kern/mutex.h
index d3fe74b5..a36ed17e 100644
--- a/kern/mutex.h
+++ b/kern/mutex.h
@@ -51,8 +51,9 @@ mutex_trylock(struct mutex *mutex)
state = mutex_tryacquire(mutex);
- if (state == MUTEX_UNLOCKED)
+ if (state == MUTEX_UNLOCKED) {
return 0;
+ }
return 1;
}
@@ -64,8 +65,9 @@ mutex_lock(struct mutex *mutex)
state = mutex_tryacquire(mutex);
- if (state == MUTEX_UNLOCKED)
+ if (state == MUTEX_UNLOCKED) {
return;
+ }
assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
@@ -79,8 +81,9 @@ mutex_unlock(struct mutex *mutex)
state = mutex_release(mutex);
- if (state == MUTEX_LOCKED)
+ if (state == MUTEX_LOCKED) {
return;
+ }
assert(state == MUTEX_CONTENDED);
diff --git a/kern/panic.c b/kern/panic.c
index 252c4b68..e0bf30cc 100644
--- a/kern/panic.c
+++ b/kern/panic.c
@@ -33,9 +33,11 @@ panic(const char *format, ...)
already_done = atomic_swap_uint(&panic_done, 1);
- if (already_done)
- for (;;)
+ if (already_done) {
+ for (;;) {
cpu_idle();
+ }
+ }
cpu_intr_disable();
cpu_halt_broadcast();
diff --git a/kern/percpu.c b/kern/percpu.c
index 71f67b1a..5b9690cc 100644
--- a/kern/percpu.c
+++ b/kern/percpu.c
@@ -52,14 +52,16 @@ percpu_setup(void)
percpu_area_size >> 10);
assert(vm_page_aligned(percpu_area_size));
- if (percpu_area_size == 0)
+ if (percpu_area_size == 0) {
return;
+ }
order = vm_page_order(percpu_area_size);
page = vm_page_alloc(order, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
panic("percpu: unable to allocate memory for percpu area content");
+ }
percpu_area_content = vm_page_direct_ptr(page);
memcpy(percpu_area_content, &_percpu, percpu_area_size);
@@ -86,8 +88,9 @@ percpu_add(unsigned int cpu)
return ERROR_INVAL;
}
- if (percpu_area_size == 0)
+ if (percpu_area_size == 0) {
goto out;
+ }
order = vm_page_order(percpu_area_size);
page = vm_page_alloc(order, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL);
diff --git a/kern/printk.c b/kern/printk.c
index df3ba715..dd5cc87f 100644
--- a/kern/printk.c
+++ b/kern/printk.c
@@ -57,8 +57,9 @@ vprintk(const char *format, va_list ap)
length = vsnprintf(printk_buffer, sizeof(printk_buffer), format, ap);
- for (ptr = printk_buffer; *ptr != '\0'; ptr++)
+ for (ptr = printk_buffer; *ptr != '\0'; ptr++) {
console_write_byte(*ptr);
+ }
spinlock_unlock_intr_restore(&printk_lock, flags);
diff --git a/kern/rbtree.c b/kern/rbtree.c
index 569d3fac..49cb097f 100644
--- a/kern/rbtree.c
+++ b/kern/rbtree.c
@@ -34,8 +34,9 @@ rbtree_node_index(const struct rbtree_node *node,
assert(parent != NULL);
assert((node == NULL) || (rbtree_node_parent(node) == parent));
- if (parent->children[RBTREE_LEFT] == node)
+ if (parent->children[RBTREE_LEFT] == node) {
return RBTREE_LEFT;
+ }
assert(parent->children[RBTREE_RIGHT] == node);
@@ -126,8 +127,9 @@ rbtree_node_find_deepest(struct rbtree_node *node)
if (node == NULL) {
node = parent->children[RBTREE_RIGHT];
- if (node == NULL)
+ if (node == NULL) {
return parent;
+ }
}
}
}
@@ -151,16 +153,18 @@ rbtree_rotate(struct rbtree *tree, struct rbtree_node *node, int direction)
node->children[right] = rnode->children[left];
- if (rnode->children[left] != NULL)
+ if (rnode->children[left] != NULL) {
rbtree_node_set_parent(rnode->children[left], node);
+ }
rnode->children[left] = node;
rbtree_node_set_parent(rnode, parent);
- if (unlikely(parent == NULL))
+ if (unlikely(parent == NULL)) {
tree->root = rnode;
- else
+ } else {
parent->children[rbtree_node_index(node, parent)] = rnode;
+ }
rbtree_node_set_parent(node, rnode);
}
@@ -179,10 +183,11 @@ rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
node->children[RBTREE_LEFT] = NULL;
node->children[RBTREE_RIGHT] = NULL;
- if (unlikely(parent == NULL))
+ if (unlikely(parent == NULL)) {
tree->root = node;
- else
+ } else {
parent->children[index] = node;
+ }
for (;;) {
if (parent == NULL) {
@@ -190,8 +195,9 @@ rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
break;
}
- if (rbtree_node_is_black(parent))
+ if (rbtree_node_is_black(parent)) {
break;
+ }
grand_parent = rbtree_node_parent(parent);
assert(grand_parent != NULL);
@@ -242,11 +248,11 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
struct rbtree_node *child, *parent, *brother;
int color, left, right;
- if (node->children[RBTREE_LEFT] == NULL)
+ if (node->children[RBTREE_LEFT] == NULL) {
child = node->children[RBTREE_RIGHT];
- else if (node->children[RBTREE_RIGHT] == NULL)
+ } else if (node->children[RBTREE_RIGHT] == NULL) {
child = node->children[RBTREE_LEFT];
- else {
+ } else {
struct rbtree_node *successor;
/*
@@ -255,17 +261,19 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
successor = node->children[RBTREE_RIGHT];
- while (successor->children[RBTREE_LEFT] != NULL)
+ while (successor->children[RBTREE_LEFT] != NULL) {
successor = successor->children[RBTREE_LEFT];
+ }
color = rbtree_node_color(successor);
child = successor->children[RBTREE_RIGHT];
parent = rbtree_node_parent(node);
- if (unlikely(parent == NULL))
+ if (unlikely(parent == NULL)) {
tree->root = successor;
- else
+ } else {
parent->children[rbtree_node_index(node, parent)] = successor;
+ }
parent = rbtree_node_parent(successor);
@@ -276,16 +284,17 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
successor->children[RBTREE_LEFT] = node->children[RBTREE_LEFT];
rbtree_node_set_parent(successor->children[RBTREE_LEFT], successor);
- if (node == parent)
+ if (node == parent) {
parent = successor;
- else {
+ } else {
successor->children[RBTREE_RIGHT] = node->children[RBTREE_RIGHT];
rbtree_node_set_parent(successor->children[RBTREE_RIGHT],
successor);
parent->children[RBTREE_LEFT] = child;
- if (child != NULL)
+ if (child != NULL) {
rbtree_node_set_parent(child, parent);
+ }
}
goto update_color;
@@ -298,21 +307,24 @@ rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
color = rbtree_node_color(node);
parent = rbtree_node_parent(node);
- if (child != NULL)
+ if (child != NULL) {
rbtree_node_set_parent(child, parent);
+ }
- if (unlikely(parent == NULL))
+ if (unlikely(parent == NULL)) {
tree->root = child;
- else
+ } else {
parent->children[rbtree_node_index(node, parent)] = child;
+ }
/*
* The node has been removed, update the colors. The child pointer can
* be null, in which case it is considered a black leaf.
*/
update_color:
- if (color == RBTREE_COLOR_RED)
+ if (color == RBTREE_COLOR_RED) {
return;
+ }
for (;;) {
if ((child != NULL) && rbtree_node_is_red(child)) {
@@ -320,8 +332,9 @@ update_color:
break;
}
- if (parent == NULL)
+ if (parent == NULL) {
break;
+ }
left = rbtree_node_index(child, parent);
right = 1 - left;
@@ -383,13 +396,15 @@ rbtree_nearest(struct rbtree_node *parent, int index, int direction)
{
assert(rbtree_check_index(direction));
- if (parent == NULL)
+ if (parent == NULL) {
return NULL;
+ }
assert(rbtree_check_index(index));
- if (index != direction)
+ if (index != direction) {
return parent;
+ }
return rbtree_walk(parent, direction);
}
@@ -403,8 +418,9 @@ rbtree_firstlast(const struct rbtree *tree, int direction)
prev = NULL;
- for (cur = tree->root; cur != NULL; cur = cur->children[direction])
+ for (cur = tree->root; cur != NULL; cur = cur->children[direction]) {
prev = cur;
+ }
return prev;
}
@@ -419,14 +435,16 @@ rbtree_walk(struct rbtree_node *node, int direction)
left = direction;
right = 1 - left;
- if (node == NULL)
+ if (node == NULL) {
return NULL;
+ }
if (node->children[left] != NULL) {
node = node->children[left];
- while (node->children[right] != NULL)
+ while (node->children[right] != NULL) {
node = node->children[right];
+ }
} else {
struct rbtree_node *parent;
int index;
@@ -434,14 +452,16 @@ rbtree_walk(struct rbtree_node *node, int direction)
for (;;) {
parent = rbtree_node_parent(node);
- if (parent == NULL)
+ if (parent == NULL) {
return NULL;
+ }
index = rbtree_node_index(node, parent);
node = parent;
- if (index == right)
+ if (index == right) {
break;
+ }
}
}
@@ -455,8 +475,9 @@ rbtree_postwalk_deepest(const struct rbtree *tree)
node = tree->root;
- if (node == NULL)
+ if (node == NULL) {
return NULL;
+ }
return rbtree_node_find_deepest(node);
}
@@ -467,23 +488,26 @@ rbtree_postwalk_unlink(struct rbtree_node *node)
struct rbtree_node *parent;
int index;
- if (node == NULL)
+ if (node == NULL) {
return NULL;
+ }
assert(node->children[RBTREE_LEFT] == NULL);
assert(node->children[RBTREE_RIGHT] == NULL);
parent = rbtree_node_parent(node);
- if (parent == NULL)
+ if (parent == NULL) {
return NULL;
+ }
index = rbtree_node_index(node, parent);
parent->children[index] = NULL;
node = parent->children[RBTREE_RIGHT];
- if (node == NULL)
+ if (node == NULL) {
return parent;
+ }
return rbtree_node_find_deepest(node);
}
diff --git a/kern/spinlock.h b/kern/spinlock.h
index 1cc9b08f..f63c4e0b 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -51,8 +51,9 @@ spinlock_trylock(struct spinlock *lock)
thread_preempt_disable();
busy = spinlock_tryacquire(lock);
- if (busy)
+ if (busy) {
thread_preempt_enable();
+ }
return busy;
}
diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h
index c07f6615..ed851099 100644
--- a/kern/spinlock_i.h
+++ b/kern/spinlock_i.h
@@ -35,8 +35,9 @@ spinlock_tryacquire(struct spinlock *lock)
static inline void
spinlock_acquire(struct spinlock *lock)
{
- while (spinlock_tryacquire(lock))
+ while (spinlock_tryacquire(lock)) {
cpu_pause();
+ }
}
static inline void
diff --git a/kern/sprintf.c b/kern/sprintf.c
index 13f2671e..7117dbed 100644
--- a/kern/sprintf.c
+++ b/kern/sprintf.c
@@ -75,8 +75,9 @@ static const char sprintf_digits[] = "0123456789ABCDEF";
static inline char *
sprintf_putchar(char *str, char *end, char c)
{
- if (str < end)
+ if (str < end) {
*str = c;
+ }
str++;
@@ -131,12 +132,13 @@ vsnprintf(char *str, size_t size, const char *format, va_list ap)
start = str;
- if (size == 0)
+ if (size == 0) {
end = NULL;
- else if (size == SPRINTF_NOLIMIT)
+ } else if (size == SPRINTF_NOLIMIT) {
end = (char *)-1;
- else
+ } else {
end = start + size - 1;
+ }
while ((c = *format) != '\0') {
if (c != '%') {
@@ -217,8 +219,9 @@ vsnprintf(char *str, size_t size, const char *format, va_list ap)
} else if (c == '*') {
precision = va_arg(ap, int);
- if (precision < 0)
+ if (precision < 0) {
precision = 0;
+ }
format++;
c = *format;
@@ -309,51 +312,58 @@ integer:
case SPRINTF_SPECIFIER_INT:
switch (modifier) {
case SPRINTF_MODIFIER_CHAR:
- if (flags & SPRINTF_FORMAT_CONV_SIGNED)
+ if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
n = (signed char)va_arg(ap, int);
- else
+ } else {
n = (unsigned char)va_arg(ap, int);
+ }
break;
case SPRINTF_MODIFIER_SHORT:
- if (flags & SPRINTF_FORMAT_CONV_SIGNED)
+ if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
n = (short)va_arg(ap, int);
- else
+ } else {
n = (unsigned short)va_arg(ap, int);
+ }
break;
case SPRINTF_MODIFIER_LONG:
- if (flags & SPRINTF_FORMAT_CONV_SIGNED)
+ if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
n = va_arg(ap, long);
- else
+ } else {
n = va_arg(ap, unsigned long);
+ }
break;
case SPRINTF_MODIFIER_LONGLONG:
- if (flags & SPRINTF_FORMAT_CONV_SIGNED)
+ if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
n = va_arg(ap, long long);
- else
+ } else {
n = va_arg(ap, unsigned long long);
+ }
break;
case SPRINTF_MODIFIER_PTR:
n = (unsigned long)va_arg(ap, void *);
break;
case SPRINTF_MODIFIER_SIZE:
- if (flags & SPRINTF_FORMAT_CONV_SIGNED)
+ if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
n = va_arg(ap, ssize_t);
- else
+ } else {
n = va_arg(ap, size_t);
+ }
break;
case SPRINTF_MODIFIER_PTRDIFF:
n = va_arg(ap, ptrdiff_t);
break;
default:
- if (flags & SPRINTF_FORMAT_CONV_SIGNED)
+ if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
n = va_arg(ap, int);
- else
+ } else {
n = va_arg(ap, unsigned int);
+ }
break;
}
- if ((flags & SPRINTF_FORMAT_LEFT_JUSTIFY) || (precision >= 0))
+ if ((flags & SPRINTF_FORMAT_LEFT_JUSTIFY) || (precision >= 0)) {
flags &= ~SPRINTF_FORMAT_ZERO_PAD;
+ }
sign = 0;
@@ -362,8 +372,9 @@ integer:
width--;
/* '0x' or '0X' for hexadecimal */
- if (base == 16)
+ if (base == 16) {
width--;
+ }
} else if (flags & SPRINTF_FORMAT_CONV_SIGNED) {
if ((long long)n < 0) {
sign = '-';
@@ -384,8 +395,9 @@ integer:
i = 0;
if (n == 0) {
- if (precision != 0)
+ if (precision != 0) {
tmp[i++] = '0';
+ }
} else if (base == 10) {
/*
* Try to avoid 64 bits operations if the processor doesn't
@@ -429,15 +441,17 @@ integer:
} while (n != 0);
}
- if (i > precision)
+ if (i > precision) {
precision = i;
+ }
width -= precision;
if (!(flags & (SPRINTF_FORMAT_LEFT_JUSTIFY
| SPRINTF_FORMAT_ZERO_PAD)))
- while (width-- > 0)
+ while (width-- > 0) {
str = sprintf_putchar(str, end, ' ');
+ }
if (flags & SPRINTF_FORMAT_ALT_FORM) {
str = sprintf_putchar(str, end, '0');
@@ -452,56 +466,66 @@ integer:
if (!(flags & SPRINTF_FORMAT_LEFT_JUSTIFY)) {
c = (flags & SPRINTF_FORMAT_ZERO_PAD) ? '0' : ' ';
- while (width-- > 0)
+ while (width-- > 0) {
str = sprintf_putchar(str, end, c);
+ }
}
- while (i < precision--)
+ while (i < precision--) {
str = sprintf_putchar(str, end, '0');
+ }
- while (i-- > 0)
+ while (i-- > 0) {
str = sprintf_putchar(str, end, tmp[i]);
+ }
- while (width-- > 0)
+ while (width-- > 0) {
str = sprintf_putchar(str, end, ' ');
+ }
break;
case SPRINTF_SPECIFIER_CHAR:
c = (unsigned char)va_arg(ap, int);
if (!(flags & SPRINTF_FORMAT_LEFT_JUSTIFY))
- while (--width > 0)
+ while (--width > 0) {
str = sprintf_putchar(str, end, ' ');
+ }
str = sprintf_putchar(str, end, c);
- while (--width > 0)
+ while (--width > 0) {
str = sprintf_putchar(str, end, ' ');
+ }
break;
case SPRINTF_SPECIFIER_STR:
s = va_arg(ap, char *);
- if (s == NULL)
+ if (s == NULL) {
s = "(null)";
+ }
len = 0;
for (len = 0; s[len] != '\0'; len++)
- if (len == precision)
+ if (len == precision) {
break;
+ }
if (!(flags & SPRINTF_FORMAT_LEFT_JUSTIFY))
- while (len < width--)
+ while (len < width--) {
str = sprintf_putchar(str, end, ' ');
+ }
for (i = 0; i < len; i++) {
str = sprintf_putchar(str, end, *s);
s++;
}
- while (len < width--)
+ while (len < width--) {
str = sprintf_putchar(str, end, ' ');
+ }
break;
case SPRINTF_SPECIFIER_NRCHARS:
@@ -537,14 +561,16 @@ integer:
break;
}
- if (specifier != SPRINTF_SPECIFIER_INVALID)
+ if (specifier != SPRINTF_SPECIFIER_INVALID) {
format++;
+ }
}
- if (str < end)
+ if (str < end) {
*str = '\0';
- else if (end != NULL)
+ } else if (end != NULL) {
*end = '\0';
+ }
return str - start;
}
diff --git a/kern/sref.c b/kern/sref.c
index d139fcc5..528eb226 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -191,10 +191,11 @@ sref_queue_push(struct sref_queue *queue, struct sref_counter *counter)
{
counter->next = NULL;
- if (queue->last == NULL)
+ if (queue->last == NULL) {
queue->first = counter;
- else
+ } else {
queue->last->next = counter;
+ }
queue->last = counter;
queue->size++;
@@ -208,8 +209,9 @@ sref_queue_pop(struct sref_queue *queue)
counter = queue->first;
queue->first = counter->next;
- if (queue->last == counter)
+ if (queue->last == counter) {
queue->last = NULL;
+ }
queue->size--;
return counter;
@@ -224,8 +226,9 @@ sref_queue_transfer(struct sref_queue *dest, struct sref_queue *src)
static void
sref_queue_concat(struct sref_queue *queue1, struct sref_queue *queue2)
{
- if (sref_queue_empty(queue2))
+ if (sref_queue_empty(queue2)) {
return;
+ }
if (sref_queue_empty(queue1)) {
sref_queue_transfer(queue1, queue2);
@@ -311,10 +314,11 @@ sref_counter_add(struct sref_counter *counter, unsigned long delta)
counter->value += delta;
if (counter->value == 0) {
- if (sref_counter_is_queued(counter))
+ if (sref_counter_is_queued(counter)) {
sref_counter_mark_dirty(counter);
- else
+ } else {
sref_counter_schedule_review(counter);
+ }
}
spinlock_unlock(&counter->lock);
@@ -523,9 +527,9 @@ sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter)
delta = sref_cache_delta(cache, sref_counter_index(counter));
- if (!sref_delta_is_valid(delta))
+ if (!sref_delta_is_valid(delta)) {
sref_delta_set_counter(delta, counter);
- else if (sref_delta_counter(delta) != counter) {
+ } else if (sref_delta_counter(delta) != counter) {
sref_delta_flush(delta);
sref_delta_set_counter(delta, counter);
evcnt_inc(&cache->ev_collision);
@@ -545,8 +549,9 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue)
for (i = 0; i < ARRAY_SIZE(cache->deltas); i++) {
delta = sref_cache_delta(cache, i);
- if (sref_delta_is_valid(delta))
+ if (sref_delta_is_valid(delta)) {
sref_delta_evict(delta);
+ }
}
cpu = cpu_id();
@@ -556,16 +561,17 @@ sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue)
assert(sref_cache_is_registered(cache));
assert(cpumap_test(&sref_data.registered_cpus, cpu));
- if (!cpumap_test(&sref_data.pending_flushes, cpu))
+ if (!cpumap_test(&sref_data.pending_flushes, cpu)) {
sref_queue_init(queue);
- else {
+ } else {
cpumap_clear(&sref_data.pending_flushes, cpu);
sref_data.nr_pending_flushes--;
- if (sref_data.nr_pending_flushes != 0)
+ if (sref_data.nr_pending_flushes != 0) {
sref_queue_init(queue);
- else
+ } else {
sref_end_epoch(queue);
+ }
}
spinlock_unlock(&sref_data.lock);
@@ -604,8 +610,9 @@ sref_cache_manage(struct sref_cache *cache)
static int
sref_cache_check(struct sref_cache *cache)
{
- if (!sref_cache_is_dirty(cache))
+ if (!sref_cache_is_dirty(cache)) {
return 0;
+ }
sref_cache_wakeup_manager(cache);
return 1;
@@ -661,8 +668,9 @@ sref_review(struct sref_queue *queue)
}
}
- if (work_queue_nr_works(&works) != 0)
+ if (work_queue_nr_works(&works) != 0) {
work_queue_schedule(&works, 0);
+ }
if ((nr_dirty + nr_true) != 0) {
spinlock_lock(&sref_data.lock);
@@ -685,8 +693,9 @@ sref_manage(void *arg)
thread_preempt_disable();
cpu_intr_save(&flags);
- while (!sref_cache_is_dirty(cache))
+ while (!sref_cache_is_dirty(cache)) {
thread_sleep(NULL);
+ }
cpu_intr_restore(flags);
thread_preempt_enable();
@@ -722,8 +731,9 @@ sref_setup_manager(struct sref_cache *cache, unsigned int cpu)
error = cpumap_create(&cpumap);
- if (error)
+ if (error) {
panic("sref: unable to create manager thread CPU map");
+ }
cpumap_zero(cpumap);
cpumap_set(cpumap, cpu);
@@ -735,8 +745,9 @@ sref_setup_manager(struct sref_cache *cache, unsigned int cpu)
error = thread_create(&manager, &attr, sref_manage, cache);
cpumap_destroy(cpumap);
- if (error)
+ if (error) {
panic("sref: unable to create manager thread");
+ }
cache->manager = manager;
}
@@ -746,11 +757,13 @@ sref_setup(void)
{
unsigned int i;
- for (i = 1; i < cpu_count(); i++)
+ for (i = 1; i < cpu_count(); i++) {
sref_cache_init(percpu_ptr(sref_cache, i), i);
+ }
- for (i = 0; i < cpu_count(); i++)
+ for (i = 0; i < cpu_count(); i++) {
sref_setup_manager(percpu_ptr(sref_cache, i), i);
+ }
}
void
@@ -832,9 +845,9 @@ sref_unregister(void)
error = ERROR_BUSY;
}
- if (error)
+ if (error) {
sref_cache_mark_registered(cache);
- else {
+ } else {
cpumap_clear(&sref_data.registered_cpus, cpu);
sref_data.nr_registered_cpus--;
}
@@ -855,8 +868,9 @@ sref_report_periodic_event(void)
cache = sref_cache_get();
if (!sref_cache_is_registered(cache)
- || (cache->manager == thread_self()))
+ || (cache->manager == thread_self())) {
return;
+ }
sref_cache_manage(cache);
}
diff --git a/kern/string.c b/kern/string.c
index b671894f..88d251f8 100644
--- a/kern/string.c
+++ b/kern/string.c
@@ -33,8 +33,9 @@ memcpy(void *dest, const void *src, size_t n)
dest_ptr = dest;
src_ptr = src;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr++ = *src_ptr++;
+ }
return dest;
}
@@ -52,14 +53,16 @@ memmove(void *dest, const void *src, size_t n)
dest_ptr = dest;
src_ptr = src;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr++ = *src_ptr++;
+ }
} else {
dest_ptr = dest + n - 1;
src_ptr = src + n - 1;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr-- = *src_ptr--;
+ }
}
return dest;
@@ -75,8 +78,9 @@ memset(void *s, int c, size_t n)
buffer = s;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
buffer[i] = c;
+ }
return s;
}
@@ -93,8 +97,9 @@ memcmp(const void *s1, const void *s2, size_t n)
a2 = s2;
for (i = 0; i < n; i++)
- if (a1[i] != a2[i])
+ if (a1[i] != a2[i]) {
return (int)a1[i] - (int)a2[i];
+ }
return 0;
}
@@ -108,8 +113,9 @@ strlen(const char *s)
i = 0;
- while (*s++ != '\0')
+ while (*s++ != '\0') {
i++;
+ }
return i;
}
@@ -139,8 +145,9 @@ strlcpy(char *dest, const char *src, size_t n)
len = strlen(src);
- if (n == 0)
+ if (n == 0) {
goto out;
+ }
n = (len < n) ? len : n - 1;
memcpy(dest, src, n);
@@ -157,8 +164,9 @@ strcmp(const char *s1, const char *s2)
char c1, c2;
while ((c1 = *s1) == (c2 = *s2)) {
- if (c1 == '\0')
+ if (c1 == '\0') {
return 0;
+ }
s1++;
s2++;
diff --git a/kern/task.c b/kern/task.c
index ede16831..47efeecd 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -80,8 +80,9 @@ task_create(struct task **taskp, const char *name)
error = vm_map_create(&map);
- if (error)
+ if (error) {
goto error_map;
+ }
task_init(task, name, map);
@@ -122,8 +123,9 @@ task_info(struct task *task)
if (task == NULL) {
spinlock_lock(&task_list_lock);
- list_for_each_entry(&task_list, task, node)
+ list_for_each_entry(&task_list, task, node) {
printk("task: %s\n", task->name);
+ }
spinlock_unlock(&task_list_lock);
@@ -134,10 +136,11 @@ task_info(struct task *task)
printk("task: name: %s, threads:\n", task->name);
- list_for_each_entry(&task->threads, thread, task_node)
+ list_for_each_entry(&task->threads, thread, task_node) {
printk("task: %s: %p %c %.2s:%02u %s\n", task->name, thread,
thread_state_to_chr(thread), thread_schedclass_to_str(thread),
thread_schedprio(thread), thread->name);
+ }
spinlock_unlock(&task->lock);
}
diff --git a/kern/thread.c b/kern/thread.c
index 9efcd11a..97fb7adb 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -311,8 +311,9 @@ thread_runq_init_rt(struct thread_runq *runq)
rt_runq = &runq->rt_runq;
rt_runq->bitmap = 0;
- for (i = 0; i < ARRAY_SIZE(rt_runq->threads); i++)
+ for (i = 0; i < ARRAY_SIZE(rt_runq->threads); i++) {
list_init(&rt_runq->threads[i]);
+ }
}
static void __init
@@ -328,8 +329,9 @@ thread_ts_runq_init(struct thread_ts_runq *ts_runq)
{
size_t i;
- for (i = 0; i < ARRAY_SIZE(ts_runq->group_array); i++)
+ for (i = 0; i < ARRAY_SIZE(ts_runq->group_array); i++) {
thread_ts_group_init(&ts_runq->group_array[i]);
+ }
list_init(&ts_runq->groups);
list_init(&ts_runq->threads);
@@ -390,13 +392,15 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread)
thread_sched_ops[thread->sched_class].add(runq, thread);
- if (runq->nr_threads == 0)
+ if (runq->nr_threads == 0) {
cpumap_clear_atomic(&thread_idle_runqs, thread_runq_cpu(runq));
+ }
runq->nr_threads++;
- if (thread->sched_class < runq->current->sched_class)
+ if (thread->sched_class < runq->current->sched_class) {
thread_set_flag(runq->current, THREAD_YIELD);
+ }
thread->runq = runq;
}
@@ -409,8 +413,9 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread)
runq->nr_threads--;
- if (runq->nr_threads == 0)
+ if (runq->nr_threads == 0) {
cpumap_set_atomic(&thread_idle_runqs, thread_runq_cpu(runq));
+ }
thread_sched_ops[thread->sched_class].remove(runq, thread);
}
@@ -470,8 +475,9 @@ thread_runq_wakeup(struct thread_runq *runq, struct thread *thread)
static void
thread_runq_wakeup_balancer(struct thread_runq *runq)
{
- if (runq->balancer->state == THREAD_RUNNING)
+ if (runq->balancer->state == THREAD_RUNNING) {
return;
+ }
runq->balancer->state = THREAD_RUNNING;
thread_runq_wakeup(runq, runq->balancer);
@@ -494,8 +500,9 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev)
if (prev->state != THREAD_RUNNING) {
thread_runq_remove(runq, prev);
- if ((runq->nr_threads == 0) && (prev != runq->balancer))
+ if ((runq->nr_threads == 0) && (prev != runq->balancer)) {
thread_runq_wakeup_balancer(runq);
+ }
}
next = thread_runq_get_next(runq);
@@ -578,12 +585,14 @@ thread_sched_rt_add(struct thread_runq *runq, struct thread *thread)
threads = &rt_runq->threads[thread->rt_data.priority];
list_insert_tail(threads, &thread->rt_data.node);
- if (list_singular(threads))
+ if (list_singular(threads)) {
rt_runq->bitmap |= (1ULL << thread->rt_data.priority);
+ }
if ((thread->sched_class == runq->current->sched_class)
- && (thread->rt_data.priority > runq->current->rt_data.priority))
+ && (thread->rt_data.priority > runq->current->rt_data.priority)) {
thread_set_flag(runq->current, THREAD_YIELD);
+ }
}
static void
@@ -596,8 +605,9 @@ thread_sched_rt_remove(struct thread_runq *runq, struct thread *thread)
threads = &rt_runq->threads[thread->rt_data.priority];
list_remove(&thread->rt_data.node);
- if (list_empty(threads))
+ if (list_empty(threads)) {
rt_runq->bitmap &= ~(1ULL << thread->rt_data.priority);
+ }
}
static void
@@ -616,8 +626,9 @@ thread_sched_rt_get_next(struct thread_runq *runq)
rt_runq = &runq->rt_runq;
- if (rt_runq->bitmap == 0)
+ if (rt_runq->bitmap == 0) {
return NULL;
+ }
priority = THREAD_SCHED_RT_PRIO_MAX - __builtin_clzll(rt_runq->bitmap);
threads = &rt_runq->threads[priority];
@@ -632,13 +643,15 @@ thread_sched_rt_tick(struct thread_runq *runq, struct thread *thread)
{
(void)runq;
- if (thread->sched_policy != THREAD_SCHED_POLICY_RR)
+ if (thread->sched_policy != THREAD_SCHED_POLICY_RR) {
return;
+ }
thread->rt_data.time_slice--;
- if (thread->rt_data.time_slice > 0)
+ if (thread->rt_data.time_slice > 0) {
return;
+ }
thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE;
thread_set_flag(thread, THREAD_YIELD);
@@ -669,16 +682,18 @@ thread_sched_ts_select_runq(struct thread *thread)
int i;
cpumap_for_each(&thread_idle_runqs, i) {
- if (!cpumap_test(&thread->cpumap, i))
+ if (!cpumap_test(&thread->cpumap, i)) {
continue;
+ }
runq = percpu_ptr(thread_runq, i);
spinlock_lock(&runq->lock);
/* The run queue really is idle, return it */
- if (runq->current == runq->idler)
+ if (runq->current == runq->idler) {
goto out;
+ }
spinlock_unlock(&runq->lock);
}
@@ -686,8 +701,9 @@ thread_sched_ts_select_runq(struct thread *thread)
runq = NULL;
cpumap_for_each(&thread_active_runqs, i) {
- if (!cpumap_test(&thread->cpumap, i))
+ if (!cpumap_test(&thread->cpumap, i)) {
continue;
+ }
tmp = percpu_ptr(thread_runq, i);
@@ -740,8 +756,9 @@ thread_sched_ts_enqueue_scale(unsigned int work, unsigned int old_weight,
assert(old_weight != 0);
#ifndef __LP64__
- if (likely((work < 0x10000) && (new_weight < 0x10000)))
+ if (likely((work < 0x10000) && (new_weight < 0x10000))) {
return (work * new_weight) / old_weight;
+ }
#endif /* __LP64__ */
return (unsigned int)(((unsigned long long)work * new_weight) / old_weight);
@@ -768,15 +785,16 @@ thread_sched_ts_enqueue(struct thread_ts_runq *ts_runq, unsigned long round,
while (!list_end(&ts_runq->groups, node)) {
tmp = list_entry(node, struct thread_ts_group, node);
- if (tmp->weight >= group_weight)
+ if (tmp->weight >= group_weight) {
break;
+ }
node = list_prev(node);
}
- if (group->weight == 0)
+ if (group->weight == 0) {
list_insert_after(node, &group->node);
- else if (node != init_node) {
+ } else if (node != init_node) {
list_remove(&group->node);
list_insert_after(node, &group->node);
}
@@ -792,9 +810,9 @@ thread_sched_ts_enqueue(struct thread_ts_runq *ts_runq, unsigned long round,
} else {
unsigned int group_work, thread_work;
- if (ts_runq->weight == 0)
+ if (ts_runq->weight == 0) {
thread_work = 0;
- else {
+ } else {
group_work = (group->weight == 0)
? thread_sched_ts_enqueue_scale(ts_runq->work,
ts_runq->weight,
@@ -832,8 +850,9 @@ thread_sched_ts_restart(struct thread_runq *runq)
assert(node != NULL);
ts_runq->current = list_entry(node, struct thread_ts_group, node);
- if (runq->current->sched_class == THREAD_SCHED_CLASS_TS)
+ if (runq->current->sched_class == THREAD_SCHED_CLASS_TS) {
thread_set_flag(runq->current, THREAD_YIELD);
+ }
}
static void
@@ -841,14 +860,16 @@ thread_sched_ts_add(struct thread_runq *runq, struct thread *thread)
{
unsigned int total_weight;
- if (runq->ts_weight == 0)
+ if (runq->ts_weight == 0) {
runq->ts_round = thread_ts_highest_round;
+ }
total_weight = runq->ts_weight + thread->ts_data.weight;
/* TODO Limit the maximum number of threads to prevent this situation */
- if (total_weight < runq->ts_weight)
+ if (total_weight < runq->ts_weight) {
panic("thread: weight overflow");
+ }
runq->ts_weight = total_weight;
thread_sched_ts_enqueue(runq->ts_runq_active, runq->ts_round, thread);
@@ -876,17 +897,18 @@ thread_sched_ts_dequeue(struct thread *thread)
group->weight -= thread->ts_data.weight;
ts_runq->nr_threads--;
- if (group->weight == 0)
+ if (group->weight == 0) {
list_remove(&group->node);
- else {
+ } else {
node = list_next(&group->node);
init_node = node;
while (!list_end(&ts_runq->groups, node)) {
tmp = list_entry(node, struct thread_ts_group, node);
- if (tmp->weight <= group->weight)
+ if (tmp->weight <= group->weight) {
break;
+ }
node = list_next(node);
}
@@ -908,10 +930,11 @@ thread_sched_ts_remove(struct thread_runq *runq, struct thread *thread)
thread_sched_ts_dequeue(thread);
if (ts_runq == runq->ts_runq_active) {
- if (ts_runq->nr_threads == 0)
+ if (ts_runq->nr_threads == 0) {
thread_runq_wakeup_balancer(runq);
- else
+ } else {
thread_sched_ts_restart(runq);
+ }
}
}
@@ -926,8 +949,9 @@ thread_sched_ts_deactivate(struct thread_runq *runq, struct thread *thread)
thread->ts_data.work -= thread->ts_data.weight;
thread_sched_ts_enqueue(runq->ts_runq_expired, runq->ts_round + 1, thread);
- if (runq->ts_runq_active->nr_threads == 0)
+ if (runq->ts_runq_active->nr_threads == 0) {
thread_runq_wakeup_balancer(runq);
+ }
}
static void
@@ -940,8 +964,9 @@ thread_sched_ts_put_prev(struct thread_runq *runq, struct thread *thread)
group = &ts_runq->group_array[thread->ts_data.priority];
list_insert_tail(&group->threads, &thread->ts_data.group_node);
- if (thread->ts_data.work >= thread->ts_data.weight)
+ if (thread->ts_data.work >= thread->ts_data.weight) {
thread_sched_ts_deactivate(runq, thread);
+ }
}
static int
@@ -975,8 +1000,9 @@ thread_sched_ts_get_next(struct thread_runq *runq)
ts_runq = runq->ts_runq_active;
- if (ts_runq->nr_threads == 0)
+ if (ts_runq->nr_threads == 0) {
return NULL;
+ }
group = ts_runq->current;
node = list_next(&group->node);
@@ -987,9 +1013,9 @@ thread_sched_ts_get_next(struct thread_runq *runq)
} else {
next = list_entry(node, struct thread_ts_group, node);
- if (thread_sched_ts_ratio_exceeded(group, next))
+ if (thread_sched_ts_ratio_exceeded(group, next)) {
group = next;
- else {
+ } else {
node = list_first(&ts_runq->groups);
group = list_entry(node, struct thread_ts_group, node);
}
@@ -1030,8 +1056,9 @@ thread_sched_ts_start_next_round(struct thread_runq *runq)
runq->ts_round++;
delta = (long)(runq->ts_round - thread_ts_highest_round);
- if (delta > 0)
+ if (delta > 0) {
thread_ts_highest_round = runq->ts_round;
+ }
thread_sched_ts_restart(runq);
}
@@ -1046,20 +1073,23 @@ thread_sched_ts_balance_eligible(struct thread_runq *runq,
{
unsigned int nr_threads;
- if (runq->ts_weight == 0)
+ if (runq->ts_weight == 0) {
return 0;
+ }
if ((runq->ts_round != highest_round)
- && (runq->ts_round != (highest_round - 1)))
+ && (runq->ts_round != (highest_round - 1))) {
return 0;
+ }
nr_threads = runq->ts_runq_active->nr_threads
+ runq->ts_runq_expired->nr_threads;
if ((nr_threads == 0)
|| ((nr_threads == 1)
- && (runq->current->sched_class == THREAD_SCHED_CLASS_TS)))
+ && (runq->current->sched_class == THREAD_SCHED_CLASS_TS))) {
return 0;
+ }
return 1;
}
@@ -1083,8 +1113,9 @@ thread_sched_ts_balance_scan(struct thread_runq *runq,
cpumap_for_each(&thread_active_runqs, i) {
tmp = percpu_ptr(thread_runq, i);
- if (tmp == runq)
+ if (tmp == runq) {
continue;
+ }
spinlock_lock(&tmp->lock);
@@ -1107,8 +1138,9 @@ thread_sched_ts_balance_scan(struct thread_runq *runq,
spinlock_unlock(&tmp->lock);
}
- if (remote_runq != NULL)
+ if (remote_runq != NULL) {
spinlock_unlock(&remote_runq->lock);
+ }
cpu_intr_restore(flags);
thread_preempt_enable();
@@ -1129,8 +1161,9 @@ thread_sched_ts_balance_pull(struct thread_runq *runq,
list_for_each_entry_safe(&ts_runq->threads, thread, tmp,
ts_data.runq_node) {
- if (thread == remote_runq->current)
+ if (thread == remote_runq->current) {
continue;
+ }
/*
* The pinned counter is changed without explicit synchronization.
@@ -1141,11 +1174,13 @@ thread_sched_ts_balance_pull(struct thread_runq *runq,
* changing the pinned counter and setting the current thread of a
* run queue.
*/
- if (thread->pinned)
+ if (thread->pinned) {
continue;
+ }
- if (!cpumap_test(&thread->cpumap, cpu))
+ if (!cpumap_test(&thread->cpumap, cpu)) {
continue;
+ }
/*
* Make sure at least one thread is pulled if possible. If one or more
@@ -1153,8 +1188,9 @@ thread_sched_ts_balance_pull(struct thread_runq *runq,
*/
if ((nr_pulls != 0)
&& ((runq->ts_weight + thread->ts_data.weight)
- > (remote_runq->ts_weight - thread->ts_data.weight)))
+ > (remote_runq->ts_weight - thread->ts_data.weight))) {
break;
+ }
thread_runq_remove(remote_runq, thread);
@@ -1164,8 +1200,9 @@ thread_sched_ts_balance_pull(struct thread_runq *runq,
thread_runq_add(runq, thread);
nr_pulls++;
- if (nr_pulls == THREAD_MAX_MIGRATIONS)
+ if (nr_pulls == THREAD_MAX_MIGRATIONS) {
break;
+ }
}
return nr_pulls;
@@ -1180,14 +1217,16 @@ thread_sched_ts_balance_migrate(struct thread_runq *runq,
nr_pulls = 0;
- if (!thread_sched_ts_balance_eligible(remote_runq, highest_round))
+ if (!thread_sched_ts_balance_eligible(remote_runq, highest_round)) {
goto out;
+ }
nr_pulls = thread_sched_ts_balance_pull(runq, remote_runq,
remote_runq->ts_runq_active, 0);
- if (nr_pulls == THREAD_MAX_MIGRATIONS)
+ if (nr_pulls == THREAD_MAX_MIGRATIONS) {
goto out;
+ }
/*
* Threads in the expired queue of a processor in round highest are
@@ -1224,8 +1263,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
highest_round = thread_ts_highest_round;
if ((runq->ts_round != highest_round)
- && (runq->ts_runq_expired->nr_threads != 0))
+ && (runq->ts_runq_expired->nr_threads != 0)) {
goto no_migration;
+ }
spinlock_unlock_intr_restore(&runq->lock, *flags);
thread_preempt_enable();
@@ -1240,8 +1280,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
highest_round);
spinlock_unlock(&remote_runq->lock);
- if (nr_migrations != 0)
+ if (nr_migrations != 0) {
return;
+ }
spinlock_unlock_intr_restore(&runq->lock, *flags);
thread_preempt_enable();
@@ -1256,8 +1297,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
cpumap_for_each(&thread_active_runqs, i) {
remote_runq = percpu_ptr(thread_runq, i);
- if (remote_runq == runq)
+ if (remote_runq == runq) {
continue;
+ }
thread_preempt_disable();
cpu_intr_save(flags);
@@ -1266,8 +1308,9 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
highest_round);
spinlock_unlock(&remote_runq->lock);
- if (nr_migrations != 0)
+ if (nr_migrations != 0) {
return;
+ }
spinlock_unlock_intr_restore(&runq->lock, *flags);
thread_preempt_enable();
@@ -1285,8 +1328,9 @@ no_migration:
* queue lock must remain held until the next scheduling decision to
* prevent a remote balancer thread from stealing active threads.
*/
- if (runq->ts_runq_active->nr_threads == 0)
+ if (runq->ts_runq_active->nr_threads == 0) {
thread_sched_ts_start_next_round(runq);
+ }
}
static void
@@ -1456,8 +1500,9 @@ thread_destroy_tsd(struct thread *thread)
thread->tsd[i] = NULL;
thread_dtors[i](ptr);
- if (thread->tsd[i] == NULL)
+ if (thread->tsd[i] == NULL) {
i++;
+ }
}
}
@@ -1512,13 +1557,15 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr,
thread->fn = fn;
thread->arg = arg;
- if (attr->flags & THREAD_ATTR_DETACHED)
+ if (attr->flags & THREAD_ATTR_DETACHED) {
thread->flags |= THREAD_DETACHED;
+ }
error = tcb_init(&thread->tcb, stack, thread_main);
- if (error)
+ if (error) {
goto error_tsd;
+ }
task_add_thread(task, thread);
@@ -1541,8 +1588,9 @@ thread_lock_runq(struct thread *thread, unsigned long *flags)
spinlock_lock_intr_save(&runq->lock, flags);
- if (runq == thread->runq)
+ if (runq == thread->runq) {
return runq;
+ }
spinlock_unlock_intr_restore(&runq->lock, *flags);
}
@@ -1579,8 +1627,9 @@ thread_join_common(struct thread *thread)
mutex_lock(&thread->join_lock);
- while (!thread->exited)
+ while (!thread->exited) {
condition_wait(&thread->join_cond, &thread->join_lock);
+ }
mutex_unlock(&thread->join_lock);
@@ -1598,8 +1647,9 @@ thread_reap(void *arg)
for (;;) {
mutex_lock(&thread_reap_lock);
- while (list_empty(&thread_reap_list))
+ while (list_empty(&thread_reap_list)) {
condition_wait(&thread_reap_cond, &thread_reap_lock);
+ }
list_set_head(&zombies, &thread_reap_list);
list_init(&thread_reap_list);
@@ -1630,8 +1680,9 @@ thread_setup_reaper(void)
thread_attr_init(&attr, "x15_thread_reap");
error = thread_create(&thread, &attr, thread_reap, NULL);
- if (error)
+ if (error) {
panic("thread: unable to create reaper thread");
+ }
}
static void
@@ -1643,13 +1694,15 @@ thread_balance_idle_tick(struct thread_runq *runq)
* Interrupts can occur early, at a time the balancer thread hasn't been
* created yet.
*/
- if (runq->balancer == NULL)
+ if (runq->balancer == NULL) {
return;
+ }
runq->idle_balance_ticks--;
- if (runq->idle_balance_ticks == 0)
+ if (runq->idle_balance_ticks == 0) {
thread_runq_wakeup_balancer(runq);
+ }
}
static void
@@ -1692,8 +1745,9 @@ thread_setup_balancer(struct thread_runq *runq)
error = cpumap_create(&cpumap);
- if (error)
+ if (error) {
panic("thread: unable to create balancer thread CPU map");
+ }
cpumap_zero(cpumap);
cpumap_set(cpumap, thread_runq_cpu(runq));
@@ -1706,8 +1760,9 @@ thread_setup_balancer(struct thread_runq *runq)
error = thread_create(&balancer, &attr, thread_balance, runq);
cpumap_destroy(cpumap);
- if (error)
+ if (error) {
panic("thread: unable to create balancer thread");
+ }
runq->balancer = balancer;
}
@@ -1764,20 +1819,23 @@ thread_setup_idler(struct thread_runq *runq)
error = cpumap_create(&cpumap);
- if (error)
+ if (error) {
panic("thread: unable to allocate idler thread CPU map");
+ }
cpumap_zero(cpumap);
cpumap_set(cpumap, thread_runq_cpu(runq));
idler = kmem_cache_alloc(&thread_cache);
- if (idler == NULL)
+ if (idler == NULL) {
panic("thread: unable to allocate idler thread");
+ }
stack = kmem_cache_alloc(&thread_stack_cache);
- if (stack == NULL)
+ if (stack == NULL) {
panic("thread: unable to allocate idler thread stack");
+ }
snprintf(name, sizeof(name), "x15_thread_idle/%u", thread_runq_cpu(runq));
thread_attr_init(&attr, name);
@@ -1785,8 +1843,9 @@ thread_setup_idler(struct thread_runq *runq)
thread_attr_set_policy(&attr, THREAD_SCHED_POLICY_IDLE);
error = thread_init(idler, stack, &attr, thread_idle, NULL);
- if (error)
+ if (error) {
panic("thread: unable to initialize idler thread");
+ }
cpumap_destroy(cpumap);
@@ -1808,8 +1867,9 @@ thread_setup(void)
{
int cpu;
- for (cpu = 1; (unsigned int)cpu < cpu_count(); cpu++)
+ for (cpu = 1; (unsigned int)cpu < cpu_count(); cpu++) {
thread_bootstrap_common(cpu);
+ }
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread),
CPU_L1_SIZE, NULL, 0);
@@ -1818,8 +1878,9 @@ thread_setup(void)
thread_setup_reaper();
- cpumap_for_each(&thread_active_runqs, cpu)
+ cpumap_for_each(&thread_active_runqs, cpu) {
thread_setup_runq(percpu_ptr(thread_runq, cpu));
+ }
}
int
@@ -1834,8 +1895,9 @@ thread_create(struct thread **threadp, const struct thread_attr *attr,
if (attr->cpumap != NULL) {
error = cpumap_check(attr->cpumap);
- if (error)
+ if (error) {
return error;
+ }
}
thread = kmem_cache_alloc(&thread_cache);
@@ -1854,8 +1916,9 @@ thread_create(struct thread **threadp, const struct thread_attr *attr,
error = thread_init(thread, stack, attr, fn, arg);
- if (error)
+ if (error) {
goto error_init;
+ }
/*
* The new thread address must be written before the thread is started
@@ -1994,9 +2057,9 @@ thread_wakeup(struct thread *thread)
thread_preempt_disable();
cpu_intr_save(&flags);
- if (!thread->pinned)
+ if (!thread->pinned) {
runq = thread_sched_ops[thread->sched_class].select_runq(thread);
- else {
+ } else {
runq = thread->runq;
spinlock_lock(&runq->lock);
}
@@ -2039,8 +2102,9 @@ thread_yield(void)
thread = thread_self();
- if (!thread_preempt_enabled())
+ if (!thread_preempt_enabled()) {
return;
+ }
do {
thread_preempt_disable();
@@ -2082,8 +2146,9 @@ thread_tick_intr(void)
spinlock_lock(&runq->lock);
- if (runq->nr_threads == 0)
+ if (runq->nr_threads == 0) {
thread_balance_idle_tick(runq);
+ }
thread_sched_ops[thread->sched_class].tick(runq, thread);
@@ -2142,8 +2207,9 @@ thread_key_create(unsigned int *keyp, thread_dtor_fn_t dtor)
key = atomic_fetchadd_uint(&thread_nr_keys, 1);
- if (key >= THREAD_KEYS_MAX)
+ if (key >= THREAD_KEYS_MAX) {
panic("thread: maximum number of keys exceeded");
+ }
thread_dtors[key] = dtor;
*keyp = key;
diff --git a/kern/thread.h b/kern/thread.h
index 36aea2ff..2c483ab4 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -409,8 +409,9 @@ thread_schedule(void)
{
barrier();
- if (likely(!thread_test_flag(thread_self(), THREAD_YIELD)))
+ if (likely(!thread_test_flag(thread_self(), THREAD_YIELD))) {
return;
+ }
thread_yield();
}
diff --git a/kern/work.c b/kern/work.c
index f89dd389..f3d0848a 100644
--- a/kern/work.c
+++ b/kern/work.c
@@ -192,8 +192,9 @@ work_pool_init(struct work_pool *pool, unsigned int cpu, int flags)
id = work_pool_alloc_id(pool);
error = work_thread_create(pool, id);
- if (error)
+ if (error) {
goto error_thread;
+ }
return;
@@ -212,9 +213,9 @@ work_pool_cpu_select(int flags)
static void
work_pool_acquire(struct work_pool *pool, unsigned long *flags)
{
- if (pool->flags & WORK_PF_GLOBAL)
+ if (pool->flags & WORK_PF_GLOBAL) {
spinlock_lock_intr_save(&pool->lock, flags);
- else {
+ } else {
thread_preempt_disable();
cpu_intr_save(flags);
}
@@ -223,9 +224,9 @@ work_pool_acquire(struct work_pool *pool, unsigned long *flags)
static void
work_pool_release(struct work_pool *pool, unsigned long flags)
{
- if (pool->flags & WORK_PF_GLOBAL)
+ if (pool->flags & WORK_PF_GLOBAL) {
spinlock_unlock_intr_restore(&pool->lock, flags);
- else {
+ } else {
cpu_intr_restore(flags);
thread_preempt_enable();
}
@@ -242,8 +243,9 @@ static struct work *
work_pool_pop_work(struct work_pool *pool)
{
if (!(pool->flags & WORK_PF_GLOBAL)) {
- if (work_queue_nr_works(&pool->queue1) != 0)
+ if (work_queue_nr_works(&pool->queue1) != 0) {
return work_queue_pop(&pool->queue1);
+ }
}
return work_queue_pop(&pool->queue0);
@@ -252,11 +254,13 @@ work_pool_pop_work(struct work_pool *pool)
static void
work_pool_wakeup_manager(struct work_pool *pool)
{
- if (work_pool_nr_works(pool) == 0)
+ if (work_pool_nr_works(pool) == 0) {
return;
+ }
- if ((pool->manager != NULL) && (pool->manager->thread != thread_self()))
+ if ((pool->manager != NULL) && (pool->manager->thread != thread_self())) {
thread_wakeup(pool->manager->thread);
+ }
}
static void
@@ -268,8 +272,9 @@ work_pool_shift_queues(struct work_pool *pool, struct work_queue *old_queue)
work_queue_transfer(&pool->queue1, &pool->queue0);
work_queue_init(&pool->queue0);
- if (work_queue_nr_works(old_queue) != 0)
+ if (work_queue_nr_works(old_queue) != 0) {
evcnt_inc(&pool->ev_transfer);
+ }
}
static void
@@ -308,9 +313,9 @@ work_process(void *arg)
list_insert_tail(&pool->available_threads, &self->node);
pool->nr_available_threads++;
- do
+ do {
thread_sleep(lock);
- while (pool->manager != NULL);
+ } while (pool->manager != NULL);
list_remove(&self->node);
pool->nr_available_threads--;
@@ -338,14 +343,15 @@ work_process(void *arg)
}
if (work_pool_nr_works(pool) == 0) {
- if (pool->nr_threads > WORK_THREADS_SPARE)
+ if (pool->nr_threads > WORK_THREADS_SPARE) {
break;
+ }
pool->manager = self;
- do
+ do {
thread_sleep(lock);
- while (work_pool_nr_works(pool) == 0);
+ } while (work_pool_nr_works(pool) == 0);
pool->manager = NULL;
}
@@ -396,8 +402,9 @@ work_thread_create(struct work_pool *pool, unsigned int id)
worker = kmem_cache_alloc(&work_thread_cache);
- if (worker == NULL)
+ if (worker == NULL) {
return ERROR_NOMEM;
+ }
worker->pool = pool;
worker->id = id;
@@ -419,8 +426,9 @@ work_thread_create(struct work_pool *pool, unsigned int id)
error = cpumap_create(&cpumap);
- if (error)
+ if (error) {
goto error_cpumap;
+ }
pool_id = work_pool_cpu_id(pool);
cpumap_zero(cpumap);
@@ -432,16 +440,19 @@ work_thread_create(struct work_pool *pool, unsigned int id)
thread_attr_init(&attr, name);
thread_attr_set_priority(&attr, priority);
- if (cpumap != NULL)
+ if (cpumap != NULL) {
thread_attr_set_cpumap(&attr, cpumap);
+ }
error = thread_create(&worker->thread, &attr, work_process, worker);
- if (cpumap != NULL)
+ if (cpumap != NULL) {
cpumap_destroy(cpumap);
+ }
- if (error)
+ if (error) {
goto error_thread;
+ }
return 0;
diff --git a/kern/work.h b/kern/work.h
index fb22db90..6e3876f0 100644
--- a/kern/work.h
+++ b/kern/work.h
@@ -78,10 +78,11 @@ work_queue_push(struct work_queue *queue, struct work *work)
{
work->next = NULL;
- if (queue->last == NULL)
+ if (queue->last == NULL) {
queue->first = work;
- else
+ } else {
queue->last->next = work;
+ }
queue->last = work;
queue->nr_works++;
@@ -95,8 +96,9 @@ work_queue_pop(struct work_queue *queue)
work = queue->first;
queue->first = work->next;
- if (queue->last == work)
+ if (queue->last == work) {
queue->last = NULL;
+ }
queue->nr_works--;
return work;
@@ -111,8 +113,9 @@ work_queue_transfer(struct work_queue *dest, struct work_queue *src)
static inline void
work_queue_concat(struct work_queue *queue1, struct work_queue *queue2)
{
- if (queue2->nr_works == 0)
+ if (queue2->nr_works == 0) {
return;
+ }
if (queue1->nr_works == 0) {
*queue1 = *queue2;
diff --git a/kern/xcall.c b/kern/xcall.c
index 7ea299db..398001d1 100644
--- a/kern/xcall.c
+++ b/kern/xcall.c
@@ -101,8 +101,9 @@ xcall_setup(void)
{
unsigned int i;
- for (i = 0; i < cpu_count(); i++)
+ for (i = 0; i < cpu_count(); i++) {
xcall_cpu_data_init(percpu_ptr(xcall_cpu_data, i));
+ }
}
void
@@ -139,8 +140,9 @@ xcall_call(xcall_fn_t fn, void *arg, unsigned int cpu)
cpu_send_xcall(cpu);
- while (remote_data->recv_call != NULL)
+ while (remote_data->recv_call != NULL) {
cpu_pause();
+ }
spinlock_unlock(&remote_data->lock);
diff --git a/test/test_llsync_defer.c b/test/test_llsync_defer.c
index 911a88d3..0cbe68d5 100644
--- a/test/test_llsync_defer.c
+++ b/test/test_llsync_defer.c
@@ -70,23 +70,26 @@ test_alloc(void *arg)
mutex_lock(&test_lock);
for (;;) {
- while (test_pdsc != NULL)
+ while (test_pdsc != NULL) {
condition_wait(&test_condition, &test_lock);
+ }
pdsc = kmem_cache_alloc(&test_pdsc_cache);
if (pdsc != NULL) {
pdsc->addr = vm_kmem_alloc(PAGE_SIZE);
- if (pdsc->addr != NULL)
+ if (pdsc->addr != NULL) {
memset(pdsc->addr, TEST_VALIDATION_BYTE, PAGE_SIZE);
+ }
}
llsync_assign_ptr(test_pdsc, pdsc);
condition_signal(&test_condition);
- if ((i % 100000) == 0)
+ if ((i % 100000) == 0) {
printk("alloc ");
+ }
i++;
}
@@ -99,8 +102,9 @@ test_deferred_free(struct work *work)
pdsc = structof(work, struct test_pdsc, work);
- if (pdsc->addr != NULL)
+ if (pdsc->addr != NULL) {
vm_kmem_free(pdsc->addr, PAGE_SIZE);
+ }
kmem_cache_free(&test_pdsc_cache, pdsc);
}
@@ -118,8 +122,9 @@ test_free(void *arg)
mutex_lock(&test_lock);
for (;;) {
- while (test_pdsc == NULL)
+ while (test_pdsc == NULL) {
condition_wait(&test_condition, &test_lock);
+ }
pdsc = test_pdsc;
llsync_assign_ptr(test_pdsc, NULL);
@@ -131,8 +136,9 @@ test_free(void *arg)
condition_signal(&test_condition);
- if ((i % 100000) == 0)
+ if ((i % 100000) == 0) {
printk("free ");
+ }
i++;
}
@@ -159,11 +165,13 @@ test_read(void *arg)
if (s != NULL) {
for (j = 0; j < PAGE_SIZE; j++)
- if (s[j] != TEST_VALIDATION_BYTE)
+ if (s[j] != TEST_VALIDATION_BYTE) {
panic("invalid content");
+ }
- if ((i % 100000) == 0)
+ if ((i % 100000) == 0) {
printk("read ");
+ }
i++;
}
diff --git a/test/test_pmap_update_mp.c b/test/test_pmap_update_mp.c
index 3f7f505b..df20dd13 100644
--- a/test/test_pmap_update_mp.c
+++ b/test/test_pmap_update_mp.c
@@ -73,8 +73,9 @@ test_run2(void *arg)
mutex_lock(&test_lock);
- while (test_va == NULL)
+ while (test_va == NULL) {
condition_wait(&test_condition, &test_lock);
+ }
ptr = test_va;
@@ -82,9 +83,11 @@ test_run2(void *arg)
printk("page received (%p), checking page\n", ptr);
- for (i = 0; i < PAGE_SIZE; i++)
- if (ptr[i] != 'a')
+ for (i = 0; i < PAGE_SIZE; i++) {
+ if (ptr[i] != 'a') {
panic("invalid content");
+ }
+ }
vm_kmem_free(ptr, PAGE_SIZE);
printk("done\n");
diff --git a/test/test_sref_dirty_zeroes.c b/test/test_sref_dirty_zeroes.c
index 0c808c4f..e82d35d0 100644
--- a/test/test_sref_dirty_zeroes.c
+++ b/test/test_sref_dirty_zeroes.c
@@ -59,8 +59,9 @@ test_inc(void *arg)
test_transient_ref++;
condition_signal(&test_condition);
- while (test_transient_ref != 0)
+ while (test_transient_ref != 0) {
condition_wait(&test_condition, &test_lock);
+ }
mutex_unlock(&test_lock);
}
@@ -80,8 +81,9 @@ test_dec(void *arg)
for (;;) {
mutex_lock(&test_lock);
- while (test_transient_ref == 0)
+ while (test_transient_ref == 0) {
condition_wait(&test_condition, &test_lock);
+ }
test_transient_ref--;
condition_signal(&test_condition);
diff --git a/test/test_sref_noref.c b/test/test_sref_noref.c
index 394f4793..2bcd462e 100644
--- a/test/test_sref_noref.c
+++ b/test/test_sref_noref.c
@@ -77,8 +77,9 @@ test_ref(void *arg)
printk("waiting for page\n");
- while (test_obj == NULL)
+ while (test_obj == NULL) {
condition_wait(&test_condition, &test_lock);
+ }
obj = test_obj;
@@ -86,8 +87,9 @@ test_ref(void *arg)
printk("page received, manipulate reference counter\n");
- while (!test_stop)
+ while (!test_stop) {
test_manipulate_counter(obj);
+ }
printk("thread exiting\n");
}
@@ -121,8 +123,9 @@ test_run(void *arg)
nr_threads = cpu_count() + 1;
threads = kmem_alloc(sizeof(*threads) * nr_threads);
- if (threads == NULL)
+ if (threads == NULL) {
panic("kmem_alloc: %s", error_str(ERROR_NOMEM));
+ }
for (i = 0; i < nr_threads; i++) {
snprintf(name, sizeof(name), "x15_test_ref/%u", i);
@@ -134,8 +137,9 @@ test_run(void *arg)
printk("allocating page\n");
obj = vm_kmem_alloc(sizeof(*obj));
- if (obj == NULL)
+ if (obj == NULL) {
panic("vm_kmem_alloc: %s", error_str(ERROR_NOMEM));
+ }
sref_counter_init(&obj->ref_counter, test_obj_noref);
@@ -146,14 +150,16 @@ test_run(void *arg)
condition_broadcast(&test_condition);
mutex_unlock(&test_lock);
- for (loop = 0; loop < NR_LOOPS; loop++)
+ for (loop = 0; loop < NR_LOOPS; loop++) {
test_manipulate_counter(obj);
+ }
printk("stopping test, wait for threads\n");
test_stop = 1;
- for (i = 0; i < nr_threads; i++)
+ for (i = 0; i < nr_threads; i++) {
thread_join(threads[i]);
+ }
printk("releasing initial reference\n");
sref_counter_dec(&obj->ref_counter);
diff --git a/test/test_xcall.c b/test/test_xcall.c
index 4c95165c..262196d7 100644
--- a/test/test_xcall.c
+++ b/test/test_xcall.c
@@ -48,8 +48,9 @@ test_once(unsigned int cpu)
printk("cross-call on cpu%u:\n", cpu);
xcall_call(test_fn, NULL, cpu);
- if (!test_done)
+ if (!test_done) {
panic("test_done false");
+ }
}
static void
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index 3a0f9a93..a3d95613 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -40,8 +40,9 @@ static int
vm_kmem_alloc_check(size_t size)
{
if (!vm_page_aligned(size)
- || (size == 0))
+ || (size == 0)) {
return -1;
+ }
return 0;
}
@@ -49,8 +50,9 @@ vm_kmem_alloc_check(size_t size)
static int
vm_kmem_free_check(unsigned long va, size_t size)
{
- if (!vm_page_aligned(va))
+ if (!vm_page_aligned(va)) {
return -1;
+ }
return vm_kmem_alloc_check(size);
}
@@ -68,8 +70,9 @@ vm_kmem_alloc_va(size_t size)
VM_ADV_DEFAULT, 0);
error = vm_map_enter(kernel_map, &va, size, 0, flags, NULL, 0);
- if (error)
+ if (error) {
return 0;
+ }
return (void *)va;
}
@@ -93,14 +96,16 @@ vm_kmem_alloc(size_t size)
size = vm_page_round(size);
va = (unsigned long)vm_kmem_alloc_va(size);
- if (va == 0)
+ if (va == 0) {
return 0;
+ }
for (start = va, end = va + size; start < end; start += PAGE_SIZE) {
page = vm_page_alloc(0, VM_PAGE_SEL_HIGHMEM, VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
goto error_page;
+ }
pmap_enter(kernel_pmap, start, vm_page_to_pa(page),
VM_PROT_READ | VM_PROT_WRITE, PMAP_PEF_GLOBAL);
@@ -119,8 +124,9 @@ error_page:
size = end - start;
- if (size != 0)
+ if (size != 0) {
vm_kmem_free_va((void *)start, size);
+ }
return NULL;
}
@@ -165,8 +171,9 @@ vm_kmem_map_pa(phys_addr_t pa, size_t size,
map_size = vm_page_round(pa + size) - start;
map_va = (unsigned long)vm_kmem_alloc_va(map_size);
- if (map_va == 0)
+ if (map_va == 0) {
return NULL;
+ }
for (offset = 0; offset < map_size; offset += PAGE_SIZE)
pmap_enter(kernel_pmap, map_va + offset, start + offset,
@@ -174,11 +181,13 @@ vm_kmem_map_pa(phys_addr_t pa, size_t size,
pmap_update(kernel_pmap);
- if (map_vap != NULL)
+ if (map_vap != NULL) {
*map_vap = map_va;
+ }
- if (map_sizep != NULL)
+ if (map_sizep != NULL) {
*map_sizep = map_size;
+ }
return (void *)(map_va + (unsigned long)(pa & PAGE_MASK));
}
@@ -192,8 +201,9 @@ vm_kmem_unmap_pa(unsigned long map_va, size_t map_size)
cpumap = cpumap_all();
end = map_va + map_size;
- for (va = map_va; va < end; va += PAGE_SIZE)
+ for (va = map_va; va < end; va += PAGE_SIZE) {
pmap_remove(kernel_pmap, va, cpumap);
+ }
pmap_update(kernel_pmap);
vm_kmem_free_va((void *)map_va, map_size);
diff --git a/vm/vm_map.c b/vm/vm_map.c
index a5c37ec5..2c8c31a4 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -81,8 +81,9 @@ vm_map_entry_create(void)
entry = kmem_cache_alloc(&vm_map_entry_cache);
/* TODO Handle error */
- if (entry == NULL)
+ if (entry == NULL) {
panic("vm_map: can't create map entry");
+ }
return entry;
}
@@ -100,11 +101,13 @@ vm_map_entry_cmp_lookup(unsigned long addr, const struct rbtree_node *node)
entry = rbtree_entry(node, struct vm_map_entry, tree_node);
- if (addr >= entry->end)
+ if (addr >= entry->end) {
return 1;
+ }
- if (addr >= entry->start)
+ if (addr >= entry->start) {
return 0;
+ }
return -1;
}
@@ -159,8 +162,9 @@ vm_map_lookup_nearest(struct vm_map *map, unsigned long addr)
entry = map->lookup_cache;
- if ((entry != NULL) && (addr >= entry->start) && (addr < entry->end))
+ if ((entry != NULL) && (addr >= entry->start) && (addr < entry->end)) {
return entry;
+ }
node = rbtree_lookup_nearest(&map->entry_tree, addr,
vm_map_entry_cmp_lookup, RBTREE_RIGHT);
@@ -192,21 +196,24 @@ vm_map_find_fixed(struct vm_map *map, struct vm_map_request *request)
start = request->start;
size = request->size;
- if ((start < map->start) || (start + size) > map->end)
+ if ((start < map->start) || (start + size) > map->end) {
return ERROR_NOMEM;
+ }
next = vm_map_lookup_nearest(map, start);
if (next == NULL) {
- if ((map->end - start) < size)
+ if ((map->end - start) < size) {
return ERROR_NOMEM;
+ }
request->next = NULL;
return 0;
}
- if ((start >= next->start) || ((next->start - start) < size))
+ if ((start >= next->start) || ((next->start - start) < size)) {
return ERROR_NOMEM;
+ }
request->next = next;
return 0;
@@ -225,16 +232,17 @@ vm_map_find_avail(struct vm_map *map, struct vm_map_request *request)
if (request->start != 0) {
error = vm_map_find_fixed(map, request);
- if (!error)
+ if (!error) {
return 0;
+ }
}
size = request->size;
align = request->align;
- if (size > map->find_cache_threshold)
+ if (size > map->find_cache_threshold) {
base = map->find_cache;
- else {
+ } else {
base = map->start;
/*
@@ -252,8 +260,9 @@ retry:
for (;;) {
assert(start <= map->end);
- if (align != 0)
+ if (align != 0) {
start = P2ROUND(start, align);
+ }
/*
* The end of the map has been reached, and no space could be found.
@@ -270,12 +279,13 @@ retry:
return ERROR_NOMEM;
}
- if (next == NULL)
+ if (next == NULL) {
space = map->end - start;
- else if (start >= next->start)
+ } else if (start >= next->start) {
space = 0;
- else
+ } else {
space = next->start - start;
+ }
if (space >= size) {
map->find_cache = start + size;
@@ -284,16 +294,18 @@ retry:
return 0;
}
- if (space > map->find_cache_threshold)
+ if (space > map->find_cache_threshold) {
map->find_cache_threshold = space;
+ }
start = next->end;
node = list_next(&next->list_node);
- if (list_end(&map->entry_list, node))
+ if (list_end(&map->entry_list, node)) {
next = NULL;
- else
+ } else {
next = list_entry(node, struct vm_map_entry, list_node);
+ }
}
}
@@ -304,10 +316,11 @@ vm_map_prev(struct vm_map *map, struct vm_map_entry *entry)
node = list_prev(&entry->list_node);
- if (list_end(&map->entry_list, node))
+ if (list_end(&map->entry_list, node)) {
return NULL;
- else
+ } else {
return list_entry(node, struct vm_map_entry, list_node);
+ }
}
static inline struct vm_map_entry *
@@ -317,10 +330,11 @@ vm_map_next(struct vm_map *map, struct vm_map_entry *entry)
node = list_next(&entry->list_node);
- if (list_end(&map->entry_list, node))
+ if (list_end(&map->entry_list, node)) {
return NULL;
- else
+ } else {
return list_entry(node, struct vm_map_entry, list_node);
+ }
}
static void
@@ -329,12 +343,13 @@ vm_map_link(struct vm_map *map, struct vm_map_entry *entry,
{
assert(entry->start < entry->end);
- if ((prev == NULL) && (next == NULL))
+ if ((prev == NULL) && (next == NULL)) {
list_insert_tail(&map->entry_list, &entry->list_node);
- else if (prev == NULL)
+ } else if (prev == NULL) {
list_insert_before(&next->list_node, &entry->list_node);
- else
+ } else {
list_insert_after(&prev->list_node, &entry->list_node);
+ }
rbtree_insert(&map->entry_tree, &entry->tree_node, vm_map_entry_cmp_insert);
map->nr_entries++;
@@ -345,8 +360,9 @@ vm_map_unlink(struct vm_map *map, struct vm_map_entry *entry)
{
assert(entry->start < entry->end);
- if (map->lookup_cache == entry)
+ if (map->lookup_cache == entry) {
map->lookup_cache = NULL;
+ }
list_remove(&entry->list_node);
rbtree_remove(&map->entry_tree, &entry->tree_node);
@@ -373,10 +389,11 @@ vm_map_prepare(struct vm_map *map, unsigned long start,
request->offset = offset;
vm_map_request_assert_valid(request);
- if (flags & VM_MAP_FIXED)
+ if (flags & VM_MAP_FIXED) {
error = vm_map_find_fixed(map, request);
- else
+ } else {
error = vm_map_find_avail(map, request);
+ }
return error;
}
@@ -408,11 +425,13 @@ vm_map_try_merge_prev(struct vm_map *map, const struct vm_map_request *request,
assert(entry != NULL);
- if (!vm_map_try_merge_compatible(request, entry))
+ if (!vm_map_try_merge_compatible(request, entry)) {
return NULL;
+ }
- if (entry->end != request->start)
+ if (entry->end != request->start) {
return NULL;
+ }
prev = vm_map_prev(map, entry);
next = vm_map_next(map, entry);
@@ -431,13 +450,15 @@ vm_map_try_merge_next(struct vm_map *map, const struct vm_map_request *request,
assert(entry != NULL);
- if (!vm_map_try_merge_compatible(request, entry))
+ if (!vm_map_try_merge_compatible(request, entry)) {
return NULL;
+ }
end = request->start + request->size;
- if (end != entry->start)
+ if (end != entry->start) {
return NULL;
+ }
prev = vm_map_prev(map, entry);
next = vm_map_next(map, entry);
@@ -474,8 +495,9 @@ vm_map_try_merge_near(struct vm_map *map, const struct vm_map_request *request,
entry = vm_map_try_merge_prev(map, request, first);
- if (entry != NULL)
+ if (entry != NULL) {
return entry;
+ }
return vm_map_try_merge_next(map, request, second);
}
@@ -492,18 +514,18 @@ vm_map_try_merge(struct vm_map *map, const struct vm_map_request *request)
if (request->next == NULL) {
node = list_last(&map->entry_list);
- if (list_end(&map->entry_list, node))
+ if (list_end(&map->entry_list, node)) {
entry = NULL;
- else {
+ } else {
prev = list_entry(node, struct vm_map_entry, list_node);
entry = vm_map_try_merge_prev(map, request, prev);
}
} else {
node = list_prev(&request->next->list_node);
- if (list_end(&map->entry_list, node))
+ if (list_end(&map->entry_list, node)) {
entry = vm_map_try_merge_next(map, request, request->next);
- else {
+ } else {
prev = list_entry(node, struct vm_map_entry, list_node);
entry = vm_map_try_merge_near(map, request, prev, request->next);
}
@@ -524,8 +546,9 @@ vm_map_insert(struct vm_map *map, struct vm_map_entry *entry,
if (entry == NULL) {
entry = vm_map_try_merge(map, request);
- if (entry != NULL)
+ if (entry != NULL) {
goto out;
+ }
entry = vm_map_entry_create();
}
@@ -555,13 +578,15 @@ vm_map_enter(struct vm_map *map, unsigned long *startp,
error = vm_map_prepare(map, *startp, size, align, flags, object, offset,
&request);
- if (error)
+ if (error) {
goto error_enter;
+ }
error = vm_map_insert(map, NULL, &request);
- if (error)
+ if (error) {
goto error_enter;
+ }
mutex_unlock(&map->lock);
@@ -584,8 +609,9 @@ vm_map_split_entries(struct vm_map_entry *prev, struct vm_map_entry *next,
prev->end = split_addr;
next->start = split_addr;
- if (next->object != NULL)
+ if (next->object != NULL) {
next->offset += delta;
+ }
}
static void
@@ -594,8 +620,9 @@ vm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
{
struct vm_map_entry *new_entry, *next;
- if ((start <= entry->start) || (start >= entry->end))
+ if ((start <= entry->start) || (start >= entry->end)) {
return;
+ }
next = vm_map_next(map, entry);
vm_map_unlink(map, entry);
@@ -612,8 +639,9 @@ vm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry,
{
struct vm_map_entry *new_entry, *prev;
- if ((end <= entry->start) || (end >= entry->end))
+ if ((end <= entry->start) || (end >= entry->end)) {
return;
+ }
prev = vm_map_prev(map, entry);
vm_map_unlink(map, entry);
@@ -638,8 +666,9 @@ vm_map_remove(struct vm_map *map, unsigned long start, unsigned long end)
entry = vm_map_lookup_nearest(map, start);
- if (entry == NULL)
+ if (entry == NULL) {
goto out;
+ }
vm_map_clip_start(map, entry, start);
@@ -652,8 +681,9 @@ vm_map_remove(struct vm_map *map, unsigned long start, unsigned long end)
/* TODO Defer destruction to shorten critical section */
vm_map_entry_destroy(entry);
- if (list_end(&map->entry_list, node))
+ if (list_end(&map->entry_list, node)) {
break;
+ }
entry = list_entry(node, struct vm_map_entry, list_node);
}
@@ -711,8 +741,9 @@ vm_map_create(struct vm_map **mapp)
error = pmap_create(&pmap);
- if (error)
+ if (error) {
goto error_pmap;
+ }
vm_map_init(map, pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
*mapp = map;
@@ -730,10 +761,11 @@ vm_map_info(struct vm_map *map)
struct vm_map_entry *entry;
const char *type, *name;
- if (map == kernel_map)
+ if (map == kernel_map) {
name = "kernel map";
- else
+ } else {
name = "map";
+ }
mutex_lock(&map->lock);
@@ -742,10 +774,11 @@ vm_map_info(struct vm_map *map)
"size offset flags type\n", name, map->start, map->end);
list_for_each_entry(&map->entry_list, entry, list_node) {
- if (entry->object == NULL)
+ if (entry->object == NULL) {
type = "null";
- else
+ } else {
type = "object";
+ }
printk("vm_map: %016lx %016lx %8luk %08llx %08x %s\n", entry->start,
entry->end, (entry->end - entry->start) >> 10, entry->offset,
diff --git a/vm/vm_page.c b/vm/vm_page.c
index 21fdfe37..8dec59e8 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -175,8 +175,9 @@ vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type)
nr_pages = 1 << order;
- for (i = 0; i < nr_pages; i++)
+ for (i = 0; i < nr_pages; i++) {
page[i].type = type;
+ }
}
static void __init
@@ -218,12 +219,14 @@ vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order)
for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) {
free_list = &seg->free_lists[i];
- if (free_list->size != 0)
+ if (free_list->size != 0) {
break;
+ }
}
- if (i == VM_PAGE_NR_FREE_LISTS)
+ if (i == VM_PAGE_NR_FREE_LISTS) {
return NULL;
+ }
page = list_first_entry(&free_list->blocks, struct vm_page, node);
vm_page_free_list_remove(free_list, page);
@@ -259,13 +262,15 @@ vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page,
while (order < (VM_PAGE_NR_FREE_LISTS - 1)) {
buddy_pa = pa ^ vm_page_ptoa(1 << order);
- if ((buddy_pa < seg->start) || (buddy_pa >= seg->end))
+ if ((buddy_pa < seg->start) || (buddy_pa >= seg->end)) {
break;
+ }
buddy = &seg->pages[vm_page_atop(buddy_pa - seg->start)];
- if (buddy->order != order)
+ if (buddy->order != order) {
break;
+ }
vm_page_free_list_remove(&seg->free_lists[order], buddy);
buddy->order = VM_PAGE_ORDER_UNLISTED;
@@ -330,8 +335,9 @@ vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
for (i = 0; i < cpu_pool->transfer_size; i++) {
page = vm_page_seg_alloc_from_buddy(seg, 0);
- if (page == NULL)
+ if (page == NULL) {
break;
+ }
vm_page_cpu_pool_push(cpu_pool, page);
}
@@ -373,10 +379,11 @@ vm_page_seg_compute_pool_size(struct vm_page_seg *seg)
size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO;
- if (size == 0)
+ if (size == 0) {
size = 1;
- else if (size > VM_PAGE_CPU_POOL_MAX_SIZE)
+ } else if (size > VM_PAGE_CPU_POOL_MAX_SIZE) {
size = VM_PAGE_CPU_POOL_MAX_SIZE;
+ }
return size;
}
@@ -393,21 +400,24 @@ vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end,
seg->end = end;
pool_size = vm_page_seg_compute_pool_size(seg);
- for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++)
+ for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++) {
vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size);
+ }
seg->pages = pages;
seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg));
mutex_init(&seg->lock);
- for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
+ for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++) {
vm_page_free_list_init(&seg->free_lists[i]);
+ }
seg->nr_free_pages = 0;
i = seg - vm_page_segs;
- for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
+ for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE) {
vm_page_init(&pages[vm_page_atop(pa - seg->start)], i, pa);
+ }
}
static struct vm_page *
@@ -443,8 +453,9 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order,
page = vm_page_seg_alloc_from_buddy(seg, order);
mutex_unlock(&seg->lock);
- if (page == NULL)
+ if (page == NULL) {
return NULL;
+ }
}
assert(page->type == VM_PAGE_FREE);
@@ -468,8 +479,9 @@ vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page,
cpu_pool = vm_page_cpu_pool_get(seg);
mutex_lock(&cpu_pool->lock);
- if (cpu_pool->nr_pages == cpu_pool->size)
+ if (cpu_pool->nr_pages == cpu_pool->size) {
vm_page_cpu_pool_drain(cpu_pool, seg);
+ }
vm_page_cpu_pool_push(cpu_pool, page);
mutex_unlock(&cpu_pool->lock);
@@ -574,14 +586,16 @@ vm_page_check_boot_segs(void)
unsigned int i;
int expect_loaded;
- if (vm_page_segs_size == 0)
+ if (vm_page_segs_size == 0) {
panic("vm_page: no physical memory loaded");
+ }
for (i = 0; i < ARRAY_SIZE(vm_page_boot_segs); i++) {
expect_loaded = (i < vm_page_segs_size);
- if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded)
+ if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded) {
continue;
+ }
panic("vm_page: invalid boot segment table");
}
@@ -643,8 +657,9 @@ vm_page_setup(void)
*/
nr_pages = 0;
- for (i = 0; i < vm_page_segs_size; i++)
+ for (i = 0; i < vm_page_segs_size; i++) {
nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+ }
table_size = vm_page_round(nr_pages * sizeof(struct vm_page));
printk("vm_page: page table size: %zu entries (%zuk)\n", nr_pages,
@@ -705,8 +720,9 @@ vm_page_lookup(phys_addr_t pa)
for (i = 0; i < vm_page_segs_size; i++) {
seg = &vm_page_segs[i];
- if ((pa >= seg->start) && (pa < seg->end))
+ if ((pa >= seg->start) && (pa < seg->end)) {
return &seg->pages[vm_page_atop(pa - seg->start)];
+ }
}
return NULL;
@@ -721,12 +737,14 @@ vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type)
for (i = vm_page_select_alloc_seg(selector); i < vm_page_segs_size; i--) {
page = vm_page_seg_alloc(&vm_page_segs[i], order, type);
- if (page != NULL)
+ if (page != NULL) {
return page;
+ }
}
- if (type == VM_PAGE_PMAP)
+ if (type == VM_PAGE_PMAP) {
panic("vm_page: unable to allocate pmap page");
+ }
return NULL;
}
@@ -743,16 +761,17 @@ const char *
vm_page_seg_name(unsigned int seg_index)
{
/* Don't use a switch statement since segments can be aliased */
- if (seg_index == VM_PAGE_SEG_HIGHMEM)
+ if (seg_index == VM_PAGE_SEG_HIGHMEM) {
return "HIGHMEM";
- else if (seg_index == VM_PAGE_SEG_DIRECTMAP)
+ } else if (seg_index == VM_PAGE_SEG_DIRECTMAP) {
return "DIRECTMAP";
- else if (seg_index == VM_PAGE_SEG_DMA32)
+ } else if (seg_index == VM_PAGE_SEG_DMA32) {
return "DMA32";
- else if (seg_index == VM_PAGE_SEG_DMA)
+ } else if (seg_index == VM_PAGE_SEG_DMA) {
return "DMA";
- else
+ } else {
panic("vm_page: invalid segment index");
+ }
}
void