summaryrefslogtreecommitdiff
path: root/arch/x86/machine
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/machine')
-rw-r--r--arch/x86/machine/acpimp.c66
-rw-r--r--arch/x86/machine/biosmem.c80
-rw-r--r--arch/x86/machine/boot.c65
-rw-r--r--arch/x86/machine/cga.c10
-rw-r--r--arch/x86/machine/cpu.c43
-rw-r--r--arch/x86/machine/cpu.h11
-rw-r--r--arch/x86/machine/lapic.c9
-rw-r--r--arch/x86/machine/pic.c9
-rw-r--r--arch/x86/machine/pit.c3
-rw-r--r--arch/x86/machine/pmap.c159
-rw-r--r--arch/x86/machine/strace.c28
-rw-r--r--arch/x86/machine/string.c3
-rw-r--r--arch/x86/machine/tcb.c3
-rw-r--r--arch/x86/machine/trap.c9
14 files changed, 322 insertions, 176 deletions
diff --git a/arch/x86/machine/acpimp.c b/arch/x86/machine/acpimp.c
index b8d9cd26..b294da92 100644
--- a/arch/x86/machine/acpimp.c
+++ b/arch/x86/machine/acpimp.c
@@ -140,8 +140,9 @@ acpimp_table_required(const struct acpimp_sdth *table)
acpimp_table_sig(table, sig);
for (i = 0; i < ARRAY_SIZE(acpimp_table_addrs); i++)
- if (strcmp(sig, acpimp_table_addrs[i].sig) == 0)
+ if (strcmp(sig, acpimp_table_addrs[i].sig) == 0) {
return 1;
+ }
return 0;
}
@@ -175,8 +176,9 @@ acpimp_lookup_table(const char *sig)
size_t i;
for (i = 0; i < ARRAY_SIZE(acpimp_table_addrs); i++)
- if (strcmp(sig, acpimp_table_addrs[i].sig) == 0)
+ if (strcmp(sig, acpimp_table_addrs[i].sig) == 0) {
return acpimp_table_addrs[i].table;
+ }
return NULL;
}
@@ -205,8 +207,9 @@ acpimp_free_tables(void)
for (i = 0; i < ARRAY_SIZE(acpimp_table_addrs); i++) {
table = acpimp_table_addrs[i].table;
- if (table != NULL)
+ if (table != NULL) {
kmem_free(table, table->length);
+ }
}
}
@@ -220,8 +223,9 @@ acpimp_checksum(const void *ptr, size_t size)
bytes = ptr;
checksum = 0;
- for (i = 0; i < size; i++)
+ for (i = 0; i < size; i++) {
checksum += bytes[i];
+ }
return checksum;
}
@@ -231,13 +235,15 @@ acpimp_check_rsdp(const struct acpimp_rsdp *rsdp)
{
unsigned int checksum;
- if (memcmp(rsdp->signature, ACPIMP_RSDP_SIG, sizeof(rsdp->signature)) != 0)
+ if (memcmp(rsdp->signature, ACPIMP_RSDP_SIG, sizeof(rsdp->signature)) != 0) {
return -1;
+ }
checksum = acpimp_checksum(rsdp, sizeof(*rsdp));
- if (checksum != 0)
+ if (checksum != 0) {
return -1;
+ }
return 0;
}
@@ -253,20 +259,23 @@ acpimp_get_rsdp(phys_addr_t start, size_t size, struct acpimp_rsdp *rsdp)
assert(size > 0);
assert(P2ALIGNED(size, ACPIMP_RSDP_ALIGN));
- if (!P2ALIGNED(start, ACPIMP_RSDP_ALIGN))
+ if (!P2ALIGNED(start, ACPIMP_RSDP_ALIGN)) {
return -1;
+ }
addr = (unsigned long)vm_kmem_map_pa(start, size, &map_addr, &map_size);
- if (addr == 0)
+ if (addr == 0) {
panic("acpimp: unable to map bios memory in kernel map");
+ }
for (end = addr + size; addr < end; addr += ACPIMP_RSDP_ALIGN) {
src = (const struct acpimp_rsdp *)addr;
error = acpimp_check_rsdp(src);
- if (!error)
+ if (!error) {
break;
+ }
}
if (!(addr < end)) {
@@ -292,8 +301,9 @@ acpimp_find_rsdp(struct acpimp_rsdp *rsdp)
ptr = vm_kmem_map_pa(BIOSMEM_EBDA_PTR, sizeof(*ptr), &map_addr, &map_size);
- if (ptr == NULL)
+ if (ptr == NULL) {
panic("acpimp: unable to map ebda pointer in kernel map");
+ }
base = *((const volatile uint16_t *)ptr);
vm_kmem_unmap_pa(map_addr, map_size);
@@ -302,15 +312,17 @@ acpimp_find_rsdp(struct acpimp_rsdp *rsdp)
base <<= 4;
error = acpimp_get_rsdp(base, 1024, rsdp);
- if (!error)
+ if (!error) {
return 0;
+ }
}
error = acpimp_get_rsdp(BIOSMEM_EXT_ROM, BIOSMEM_END - BIOSMEM_EXT_ROM,
rsdp);
- if (!error)
+ if (!error) {
return 0;
+ }
printk("acpimp: unable to find root system description pointer\n");
return -1;
@@ -338,8 +350,9 @@ acpimp_copy_table(uint32_t addr)
table = vm_kmem_map_pa(addr, sizeof(*table), &map_addr, &map_size);
- if (table == NULL)
+ if (table == NULL) {
panic("acpimp: unable to map acpi data in kernel map");
+ }
if (!acpimp_table_required(table)) {
copy = NULL;
@@ -351,8 +364,9 @@ acpimp_copy_table(uint32_t addr)
table = vm_kmem_map_pa(addr, size, &map_addr, &map_size);
- if (table == NULL)
+ if (table == NULL) {
panic("acpimp: unable to map acpi data in kernel map");
+ }
checksum = acpimp_checksum(table, size);
@@ -367,8 +381,9 @@ acpimp_copy_table(uint32_t addr)
copy = kmem_alloc(size);
- if (copy == NULL)
+ if (copy == NULL) {
panic("acpimp: unable to allocate memory for acpi data copy");
+ }
memcpy(copy, table, size);
@@ -387,8 +402,9 @@ acpimp_copy_tables(const struct acpimp_rsdp *rsdp)
table = acpimp_copy_table(rsdp->rsdt_address);
- if (table == NULL)
+ if (table == NULL) {
return -1;
+ }
acpimp_register_table(table);
@@ -398,16 +414,18 @@ acpimp_copy_tables(const struct acpimp_rsdp *rsdp)
for (addr = rsdt->entries; addr < end; addr++) {
table = acpimp_copy_table(*addr);
- if (table == NULL)
+ if (table == NULL) {
continue;
+ }
acpimp_register_table(table);
}
error = acpimp_check_tables();
- if (error)
+ if (error) {
goto error;
+ }
return 0;
@@ -439,8 +457,9 @@ acpimp_madt_iter_next(struct acpimp_madt_iter *iter)
static void __init
acpimp_load_lapic(const struct acpimp_madt_entry_lapic *lapic, int *is_bsp)
{
- if (!(lapic->flags & ACPIMP_MADT_LAPIC_ENABLED))
+ if (!(lapic->flags & ACPIMP_MADT_LAPIC_ENABLED)) {
return;
+ }
cpu_mp_register_lapic(lapic->apic_id, *is_bsp);
*is_bsp = 0;
@@ -460,12 +479,13 @@ acpimp_load_madt(void)
lapic_setup(madt->lapic_addr);
is_bsp = 1;
- acpimp_madt_foreach(madt, &iter)
+ acpimp_madt_foreach(madt, &iter) {
switch (iter.entry->type) {
case ACPIMP_MADT_ENTRY_LAPIC:
acpimp_load_lapic(&iter.entry->lapic, &is_bsp);
break;
}
+ }
}
int __init
@@ -476,13 +496,15 @@ acpimp_setup(void)
error = acpimp_find_rsdp(&rsdp);
- if (error)
+ if (error) {
return error;
+ }
error = acpimp_copy_tables(&rsdp);
- if (error)
+ if (error) {
return error;
+ }
acpimp_info();
acpimp_load_madt();
diff --git a/arch/x86/machine/biosmem.c b/arch/x86/machine/biosmem.c
index 0db7bfde..dd55b04b 100644
--- a/arch/x86/machine/biosmem.c
+++ b/arch/x86/machine/biosmem.c
@@ -156,7 +156,7 @@ biosmem_register_boot_data(phys_addr_t start, phys_addr_t end, bool temporary)
for (i = 0; i < biosmem_nr_boot_data; i++) {
/* Check if the new range overlaps */
if ((end > biosmem_boot_data_array[i].start)
- && (start < biosmem_boot_data_array[i].end)) {
+ && (start < biosmem_boot_data_array[i].end)) {
/*
* If it does, check whether it's part of another range.
@@ -321,8 +321,9 @@ biosmem_map_sort(void)
tmp = biosmem_map[i];
for (j = i - 1; j < i; j--) {
- if (biosmem_map[j].base_addr < tmp.base_addr)
+ if (biosmem_map[j].base_addr < tmp.base_addr) {
break;
+ }
biosmem_map[j + 1] = biosmem_map[j];
}
@@ -402,19 +403,20 @@ biosmem_map_adjust(void)
continue;
}
- if (tmp.type == a->type)
+ if (tmp.type == a->type) {
first = a;
- else if (tmp.type == b->type)
+ } else if (tmp.type == b->type) {
first = b;
- else {
+ } else {
/*
* If the overlapping area can't be merged with one of its
* neighbors, it must be added as a new entry.
*/
- if (biosmem_map_size >= ARRAY_SIZE(biosmem_map))
+ if (biosmem_map_size >= ARRAY_SIZE(biosmem_map)) {
boot_panic(biosmem_panic_too_big_msg);
+ }
biosmem_map[biosmem_map_size] = tmp;
biosmem_map_size++;
@@ -422,8 +424,9 @@ biosmem_map_adjust(void)
continue;
}
- if (first->base_addr > tmp.base_addr)
+ if (first->base_addr > tmp.base_addr) {
first->base_addr = tmp.base_addr;
+ }
first->length += tmp.length;
j++;
@@ -455,32 +458,38 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
map_end = biosmem_map + biosmem_map_size;
for (entry = biosmem_map; entry < map_end; entry++) {
- if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE) {
continue;
+ }
start = vm_page_round(entry->base_addr);
- if (start >= *phys_end)
+ if (start >= *phys_end) {
break;
+ }
end = vm_page_trunc(entry->base_addr + entry->length);
if ((start < end) && (start < *phys_end) && (end > *phys_start)) {
- if (seg_start == (phys_addr_t)-1)
+ if (seg_start == (phys_addr_t)-1) {
seg_start = start;
+ }
seg_end = end;
}
}
- if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1))
+ if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1)) {
return -1;
+ }
- if (seg_start > *phys_start)
+ if (seg_start > *phys_start) {
*phys_start = seg_start;
+ }
- if (seg_end < *phys_end)
+ if (seg_end < *phys_end) {
*phys_end = seg_end;
+ }
return 0;
}
@@ -594,8 +603,9 @@ biosmem_setup_allocator(const struct multiboot_raw_info *mbi)
end = vm_page_trunc((mbi->mem_upper + 1024) << 10);
#ifndef __LP64__
- if (end > VM_PAGE_DIRECTMAP_LIMIT)
+ if (end > VM_PAGE_DIRECTMAP_LIMIT) {
end = VM_PAGE_DIRECTMAP_LIMIT;
+ }
#endif /* __LP64__ */
max_heap_start = 0;
@@ -617,8 +627,9 @@ biosmem_setup_allocator(const struct multiboot_raw_info *mbi)
start = heap_end;
}
- if (max_heap_start >= max_heap_end)
+ if (max_heap_start >= max_heap_end) {
boot_panic(biosmem_panic_setup_msg);
+ }
biosmem_heap_start = max_heap_start;
biosmem_heap_end = max_heap_end;
@@ -636,10 +647,11 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_addr_t phys_start, phys_end;
int error;
- if (mbi->flags & MULTIBOOT_LOADER_MMAP)
+ if (mbi->flags & MULTIBOOT_LOADER_MMAP) {
biosmem_map_build(mbi);
- else
+ } else {
biosmem_map_build_simple(mbi);
+ }
biosmem_map_adjust();
@@ -647,8 +659,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_DMA_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
boot_panic(biosmem_panic_noseg_msg);
+ }
biosmem_set_segment(VM_PAGE_SEG_DMA, phys_start, phys_end);
@@ -657,8 +670,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_DMA32_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
goto out;
+ }
biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end);
@@ -667,8 +681,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_DIRECTMAP_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
goto out;
+ }
biosmem_set_segment(VM_PAGE_SEG_DIRECTMAP, phys_start, phys_end);
@@ -676,8 +691,9 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi)
phys_end = VM_PAGE_HIGHMEM_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
- if (error)
+ if (error) {
goto out;
+ }
biosmem_set_segment(VM_PAGE_SEG_HIGHMEM, phys_start, phys_end);
@@ -692,8 +708,9 @@ biosmem_bootalloc(unsigned int nr_pages)
size = vm_page_ptoa(nr_pages);
- if (size == 0)
+ if (size == 0) {
boot_panic(biosmem_panic_inval_msg);
+ }
if (biosmem_heap_topdown) {
addr = biosmem_heap_top - size;
@@ -722,12 +739,13 @@ biosmem_bootalloc(unsigned int nr_pages)
phys_addr_t __boot
biosmem_directmap_end(void)
{
- if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0)
+ if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0) {
return biosmem_segment_end(VM_PAGE_SEG_DIRECTMAP);
- else if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0)
+ } else if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0) {
return biosmem_segment_end(VM_PAGE_SEG_DMA32);
- else
+ } else {
return biosmem_segment_end(VM_PAGE_SEG_DMA);
+ }
}
#if DEBUG
@@ -836,8 +854,9 @@ biosmem_setup(void)
: (uint64_t)1 << cpu->phys_addr_width;
for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
- if (biosmem_segment_size(i) == 0)
+ if (biosmem_segment_size(i) == 0) {
break;
+ }
seg = &biosmem_segments[i];
biosmem_load_segment(seg, max_phys_end);
@@ -911,13 +930,15 @@ biosmem_free_usable(void)
for (i = 0; i < biosmem_map_size; i++) {
entry = &biosmem_map[i];
- if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE) {
continue;
+ }
start = vm_page_round(entry->base_addr);
- if (start >= VM_PAGE_HIGHMEM_LIMIT)
+ if (start >= VM_PAGE_HIGHMEM_LIMIT) {
break;
+ }
end = vm_page_trunc(entry->base_addr + entry->length);
@@ -925,8 +946,9 @@ biosmem_free_usable(void)
end = VM_PAGE_HIGHMEM_LIMIT;
}
- if (start < BIOSMEM_BASE)
+ if (start < BIOSMEM_BASE) {
start = BIOSMEM_BASE;
+ }
if (start >= end) {
continue;
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 4b1b24f8..d5359755 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -107,14 +107,16 @@ boot_memmove(void *dest, const void *src, size_t n)
dest_ptr = dest;
src_ptr = src;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr++ = *src_ptr++;
+ }
} else {
dest_ptr = dest + n - 1;
src_ptr = src + n - 1;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
*dest_ptr-- = *src_ptr--;
+ }
}
return dest;
@@ -128,8 +130,9 @@ boot_memset(void *s, int c, size_t n)
buffer = s;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++) {
buffer[i] = c;
+ }
return s;
}
@@ -141,8 +144,9 @@ boot_strlen(const char *s)
i = 0;
- while (*s++ != '\0')
+ while (*s++ != '\0') {
i++;
+ }
return i;
}
@@ -158,16 +162,19 @@ boot_panic(const char *msg)
s = boot_panic_intro_msg;
- while ((ptr < end) && (*s != '\0'))
+ while ((ptr < end) && (*s != '\0')) {
*ptr++ = (BOOT_CGACOLOR << 8) | *s++;
+ }
s = msg;
- while ((ptr < end) && (*s != '\0'))
+ while ((ptr < end) && (*s != '\0')) {
*ptr++ = (BOOT_CGACOLOR << 8) | *s++;
+ }
- while (ptr < end)
+ while (ptr < end) {
*ptr++ = (BOOT_CGACOLOR << 8) | ' ';
+ }
cpu_halt();
@@ -180,8 +187,9 @@ boot_save_cmdline_sizes(struct multiboot_raw_info *mbi)
struct multiboot_raw_module *mod;
uint32_t i;
- if (mbi->flags & MULTIBOOT_LOADER_CMDLINE)
+ if (mbi->flags & MULTIBOOT_LOADER_CMDLINE) {
mbi->unused0 = boot_strlen((char *)(unsigned long)mbi->cmdline) + 1;
+ }
if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
unsigned long addr;
@@ -237,8 +245,9 @@ boot_register_data(const struct multiboot_raw_info *mbi)
shdr = (struct elf_shdr *)(tmp + (i * mbi->shdr_size));
if ((shdr->type != ELF_SHT_SYMTAB)
- && (shdr->type != ELF_SHT_STRTAB))
+ && (shdr->type != ELF_SHT_STRTAB)) {
continue;
+ }
biosmem_register_boot_data(shdr->addr, shdr->addr + shdr->size, true);
}
@@ -248,11 +257,13 @@ boot_register_data(const struct multiboot_raw_info *mbi)
pmap_pte_t * __boot
boot_setup_paging(struct multiboot_raw_info *mbi, unsigned long eax)
{
- if (eax != MULTIBOOT_LOADER_MAGIC)
+ if (eax != MULTIBOOT_LOADER_MAGIC) {
boot_panic(boot_panic_loader_msg);
+ }
- if (!(mbi->flags & MULTIBOOT_LOADER_MEMORY))
+ if (!(mbi->flags & MULTIBOOT_LOADER_MEMORY)) {
boot_panic(boot_panic_meminfo_msg);
+ }
/*
* Save the multiboot data passed by the boot loader, initialize the
@@ -260,8 +271,9 @@ boot_setup_paging(struct multiboot_raw_info *mbi, unsigned long eax)
*/
boot_memmove(&boot_raw_mbi, mbi, sizeof(boot_raw_mbi));
- if ((mbi->flags & MULTIBOOT_LOADER_MODULES) && (mbi->mods_count == 0))
+ if ((mbi->flags & MULTIBOOT_LOADER_MODULES) && (mbi->mods_count == 0)) {
boot_raw_mbi.flags &= ~MULTIBOOT_LOADER_MODULES;
+ }
/*
* The kernel and modules command lines will be memory mapped later
@@ -298,13 +310,15 @@ boot_save_memory(uint32_t addr, size_t size)
*/
src = vm_kmem_map_pa(addr, size, &map_addr, &map_size);
- if (src == NULL)
+ if (src == NULL) {
panic("boot: unable to map boot data in kernel map");
+ }
copy = kmem_alloc(size);
- if (copy == NULL)
+ if (copy == NULL) {
panic("boot: unable to allocate memory for boot data copy");
+ }
memcpy(copy, src, size);
vm_kmem_unmap_pa(map_addr, map_size);
@@ -323,13 +337,15 @@ boot_save_mod(struct multiboot_module *dest_mod,
size = src_mod->mod_end - src_mod->mod_start;
src = vm_kmem_map_pa(src_mod->mod_start, size, &map_addr, &map_size);
- if (src == NULL)
+ if (src == NULL) {
panic("boot: unable to map module in kernel map");
+ }
copy = kmem_alloc(size);
- if (copy == NULL)
+ if (copy == NULL) {
panic("boot: unable to allocate memory for module copy");
+ }
memcpy(copy, src, size);
vm_kmem_unmap_pa(map_addr, map_size);
@@ -337,10 +353,11 @@ boot_save_mod(struct multiboot_module *dest_mod,
dest_mod->mod_start = copy;
dest_mod->mod_end = copy + size;
- if (src_mod->string == 0)
+ if (src_mod->string == 0) {
dest_mod->string = NULL;
- else
+ } else {
dest_mod->string = boot_save_memory(src_mod->string, src_mod->reserved);
+ }
}
static void __init
@@ -361,17 +378,20 @@ boot_save_mods(void)
size = boot_raw_mbi.mods_count * sizeof(struct multiboot_raw_module);
src = vm_kmem_map_pa(boot_raw_mbi.mods_addr, size, &map_addr, &map_size);
- if (src == NULL)
+ if (src == NULL) {
panic("boot: unable to map module table in kernel map");
+ }
size = boot_raw_mbi.mods_count * sizeof(struct multiboot_module);
dest = kmem_alloc(size);
- if (dest == NULL)
+ if (dest == NULL) {
panic("boot: unable to allocate memory for the module table");
+ }
- for (i = 0; i < boot_raw_mbi.mods_count; i++)
+ for (i = 0; i < boot_raw_mbi.mods_count; i++) {
boot_save_mod(&dest[i], &src[i]);
+ }
vm_kmem_unmap_pa(map_addr, map_size);
@@ -395,8 +415,9 @@ boot_save_data(void)
if (boot_mbi.flags & MULTIBOOT_LOADER_CMDLINE)
boot_mbi.cmdline = boot_save_memory(boot_raw_mbi.cmdline,
boot_raw_mbi.unused0);
- else
+ else {
boot_mbi.cmdline = NULL;
+ }
boot_save_mods();
strace_setup(&boot_raw_mbi);
diff --git a/arch/x86/machine/cga.c b/arch/x86/machine/cga.c
index 07d4eb5e..1ab23582 100644
--- a/arch/x86/machine/cga.c
+++ b/arch/x86/machine/cga.c
@@ -137,16 +137,17 @@ cga_scroll_lines(void)
CGA_MEMORY_SIZE - (CGA_COLUMNS * 2));
last_line = (uint16_t *)cga_memory + (CGA_COLUMNS * (CGA_LINES - 1));
- for(i = 0; i < CGA_COLUMNS; i++)
+ for(i = 0; i < CGA_COLUMNS; i++) {
last_line[i] = CGA_BLANK;
+ }
}
void
cga_write_byte(uint8_t byte)
{
- if (byte == '\r')
+ if (byte == '\r') {
return;
- else if (byte == '\n') {
+ } else if (byte == '\n') {
cga_cursor += CGA_COLUMNS - cga_get_cursor_column();
if (cga_cursor >= (CGA_LINES * CGA_COLUMNS)) {
@@ -164,8 +165,9 @@ cga_write_byte(uint8_t byte)
} else if (byte == '\t') {
int i;
- for(i = 0; i < CGA_TABULATION_SPACES; i++)
+ for(i = 0; i < CGA_TABULATION_SPACES; i++) {
cga_write_byte(' ');
+ }
} else {
if ((cga_cursor + 1) >= CGA_COLUMNS * CGA_LINES) {
cga_scroll_lines();
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index 54e299dc..0f9ad343 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -242,7 +242,7 @@ cpu_seg_set_tss(char *table, unsigned int selector, struct cpu_tss *tss)
| CPU_DESC_PRESENT | CPU_DESC_TYPE_TSS
| ((base & CPU_DESC_SEG_BASE_MID_MASK) >> 16);
desc->word1 = ((base & CPU_DESC_SEG_BASE_LOW_MASK) << 16)
- | (limit & CPU_DESC_SEG_LIMIT_LOW_MASK);
+ | (limit & CPU_DESC_SEG_LIMIT_LOW_MASK);
}
/*
@@ -438,13 +438,15 @@ cpu_init(struct cpu *cpu)
cpu->type = (eax & CPU_TYPE_MASK) >> CPU_TYPE_SHIFT;
cpu->family = (eax & CPU_FAMILY_MASK) >> CPU_FAMILY_SHIFT;
- if (cpu->family == 0xf)
+ if (cpu->family == 0xf) {
cpu->family += (eax & CPU_EXTFAMILY_MASK) >> CPU_EXTFAMILY_SHIFT;
+ }
cpu->model = (eax & CPU_MODEL_MASK) >> CPU_MODEL_SHIFT;
- if ((cpu->model == 6) || (cpu->model == 0xf))
+ if ((cpu->model == 6) || (cpu->model == 0xf)) {
cpu->model += (eax & CPU_EXTMODEL_MASK) >> CPU_EXTMODEL_SHIFT;
+ }
cpu->stepping = (eax & CPU_STEPPING_MASK) >> CPU_STEPPING_SHIFT;
cpu->clflush_size = ((ebx & CPU_CLFLUSH_MASK) >> CPU_CLFLUSH_SHIFT) * 8;
@@ -455,10 +457,11 @@ cpu_init(struct cpu *cpu)
eax = 0x80000000;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax <= 0x80000000)
+ if (eax <= 0x80000000) {
max_extended = 0;
- else
+ } else {
max_extended = eax;
+ }
if (max_extended < 0x80000001) {
cpu->features3 = 0;
@@ -526,12 +529,14 @@ cpu_panic_on_missing_feature(const char *feature)
void __init
cpu_check(const struct cpu *cpu)
{
- if (!(cpu->features2 & CPU_FEATURE2_FPU))
+ if (!(cpu->features2 & CPU_FEATURE2_FPU)) {
cpu_panic_on_missing_feature("fpu");
+ }
/* TODO: support UP with legacy PIC machines */
- if (!(cpu->features2 & CPU_FEATURE2_APIC))
+ if (!(cpu->features2 & CPU_FEATURE2_APIC)) {
cpu_panic_on_missing_feature("apic");
+ }
}
void
@@ -541,8 +546,9 @@ cpu_info(const struct cpu *cpu)
cpu->id, cpu->vendor_id, cpu->type, cpu->family, cpu->model,
cpu->stepping);
- if (strlen(cpu->model_name) > 0)
+ if (strlen(cpu->model_name) > 0) {
printk("cpu%u: %s\n", cpu->id, cpu->model_name);
+ }
if ((cpu->phys_addr_width != 0) && (cpu->virt_addr_width != 0))
printk("cpu%u: address widths: physical: %hu, virtual: %hu\n",
@@ -558,8 +564,9 @@ cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
if (is_bsp) {
cpu = percpu_ptr(cpu_desc, 0);
- if (cpu->apic_id != CPU_INVALID_APIC_ID)
+ if (cpu->apic_id != CPU_INVALID_APIC_ID) {
panic("cpu: another processor pretends to be the BSP");
+ }
cpu->apic_id = apic_id;
return;
@@ -567,8 +574,9 @@ cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
error = percpu_add(cpu_nr_active);
- if (error)
+ if (error) {
return;
+ }
cpu = percpu_ptr(cpu_desc, cpu_nr_active);
cpu_preinit(cpu, cpu_nr_active, apic_id);
@@ -583,8 +591,9 @@ cpu_mp_probe(void)
error = acpimp_setup();
/* TODO Support UP with legacy PIC */
- if (error)
+ if (error) {
panic("cpu: ACPI required to initialize local APIC");
+ }
printk("cpu: %u processor(s) configured\n", cpu_count());
}
@@ -625,15 +634,17 @@ cpu_mp_setup(void)
page = vm_page_alloc(vm_page_order(STACK_SIZE), VM_PAGE_SEL_DIRECTMAP,
VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
panic("cpu: unable to allocate boot stack for cpu%u", i);
+ }
cpu->boot_stack = vm_page_direct_ptr(page);
page = vm_page_alloc(vm_page_order(STACK_SIZE), VM_PAGE_SEL_DIRECTMAP,
VM_PAGE_KERNEL);
- if (page == NULL)
+ if (page == NULL) {
panic("cpu: unable to allocate double fault stack for cpu%u", i);
+ }
cpu->double_fault_stack = vm_page_direct_ptr(page);
}
@@ -658,8 +669,9 @@ cpu_mp_setup(void)
lapic_ipi_startup(cpu->apic_id, BOOT_MP_TRAMPOLINE_ADDR >> 12);
cpu_delay(200);
- while (cpu->state == CPU_STATE_OFF)
+ while (cpu->state == CPU_STATE_OFF) {
cpu_pause();
+ }
}
}
@@ -683,8 +695,9 @@ cpu_halt_broadcast(void)
nr_cpus = cpu_count();
- if (nr_cpus == 1)
+ if (nr_cpus == 1) {
return;
+ }
lapic_ipi_broadcast(TRAP_CPU_HALT);
}
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index c4187d32..a18712c8 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -337,8 +337,9 @@ cpu_halt(void)
{
cpu_intr_disable();
- for (;;)
+ for (;;) {
asm volatile("hlt" : : : "memory");
+ }
}
/*
@@ -495,16 +496,16 @@ cpu_tlb_flush(void)
static __always_inline void
cpu_tlb_flush_all(void)
{
- if (!cpu_has_global_pages())
+ if (!cpu_has_global_pages()) {
cpu_tlb_flush();
- else {
+ } else {
unsigned long cr4;
cr4 = cpu_get_cr4();
- if (!(cr4 & CPU_CR4_PGE))
+ if (!(cr4 & CPU_CR4_PGE)) {
cpu_tlb_flush();
- else {
+ } else {
cr4 &= ~CPU_CR4_PGE;
cpu_set_cr4(cr4);
cr4 |= CPU_CR4_PGE;
diff --git a/arch/x86/machine/lapic.c b/arch/x86/machine/lapic.c
index 994aef62..26b0dd50 100644
--- a/arch/x86/machine/lapic.c
+++ b/arch/x86/machine/lapic.c
@@ -247,13 +247,15 @@ lapic_setup(uint32_t map_addr)
lapic_map = vm_kmem_map_pa(map_addr, sizeof(*lapic_map), NULL, NULL);
- if (lapic_map == NULL)
+ if (lapic_map == NULL) {
panic("lapic: unable to map registers in kernel map");
+ }
value = lapic_read(&lapic_map->version);
- if ((value & LAPIC_VERSION_MASK) != LAPIC_VERSION_MASK)
+ if ((value & LAPIC_VERSION_MASK) != LAPIC_VERSION_MASK) {
panic("lapic: external local APIC not supported");
+ }
lapic_setup_registers();
lapic_setup_timer();
@@ -268,8 +270,9 @@ lapic_ap_setup(void)
static void
lapic_ipi(uint32_t apic_id, uint32_t icr)
{
- if ((icr & LAPIC_ICR_DEST_MASK) == 0)
+ if ((icr & LAPIC_ICR_DEST_MASK) == 0) {
lapic_write(&lapic_map->icr_high, apic_id << LAPIC_DEST_SHIFT);
+ }
lapic_write(&lapic_map->icr_low, icr & ~LAPIC_ICR_RESERVED);
}
diff --git a/arch/x86/machine/pic.c b/arch/x86/machine/pic.c
index e0ed1e31..440ca3a7 100644
--- a/arch/x86/machine/pic.c
+++ b/arch/x86/machine/pic.c
@@ -75,8 +75,9 @@ pic_setup(void)
static void
pic_eoi(unsigned long intr)
{
- if (intr >= PIC_NR_INTRS)
+ if (intr >= PIC_NR_INTRS) {
io_write_byte(PIC_SLAVE_CMD, PIC_EOI);
+ }
io_write_byte(PIC_MASTER_CMD, PIC_EOI);
}
@@ -101,13 +102,15 @@ pic_spurious_intr(struct trap_frame *frame)
if (intr == PIC_SPURIOUS_INTR) {
isr = pic_read_isr(PIC_MASTER_CMD);
- if (isr & (1 << PIC_SPURIOUS_INTR))
+ if (isr & (1 << PIC_SPURIOUS_INTR)) {
panic("pic: real interrupt %lu", intr);
+ }
} else {
isr = pic_read_isr(PIC_SLAVE_CMD);
- if (isr & (1 << PIC_SPURIOUS_INTR))
+ if (isr & (1 << PIC_SPURIOUS_INTR)) {
panic("pic: real interrupt %lu", intr);
+ }
pic_eoi(PIC_SLAVE_INTR);
}
diff --git a/arch/x86/machine/pit.c b/arch/x86/machine/pit.c
index ea7a1a58..d3fece51 100644
--- a/arch/x86/machine/pit.c
+++ b/arch/x86/machine/pit.c
@@ -80,8 +80,9 @@ pit_delay(unsigned long usecs)
diff = prev - count;
prev = count;
- if (diff < 0)
+ if (diff < 0) {
diff += PIT_MAX_COUNT;
+ }
total -= diff;
} while (total > 0);
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index aef17410..193c1aa5 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -300,8 +300,9 @@ pmap_boot_enter(pmap_pte_t *root_ptp, unsigned long va, phys_addr_t pa,
pmap_pte_t *pt, *ptp, *pte, bits;
unsigned int level, last_level;
- if (pa != (pa & PMAP_PA_MASK))
+ if (pa != (pa & PMAP_PA_MASK)) {
boot_panic(pmap_panic_inval_msg);
+ }
switch (pgsize) {
#ifdef __LP64__
@@ -326,9 +327,9 @@ pmap_boot_enter(pmap_pte_t *root_ptp, unsigned long va, phys_addr_t pa,
pt_level = &pt_levels[level];
pte = &pt[pmap_pte_index(va, pt_level)];
- if (*pte != 0)
+ if (*pte != 0) {
ptp = (void *)(unsigned long)(*pte & PMAP_PA_MASK);
- else {
+ } else {
ptp = biosmem_bootalloc(1);
*pte = ((unsigned long)ptp | PMAP_PTE_RW | PMAP_PTE_P)
& pt_level->mask;
@@ -351,14 +352,16 @@ pmap_boot_get_pgsize(void)
eax = 0x80000000;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax <= 0x80000000)
+ if (eax <= 0x80000000) {
goto out;
+ }
eax = 0x80000001;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (edx & CPU_FEATURE4_1GP)
+ if (edx & CPU_FEATURE4_1GP) {
return (1 << PMAP_L2_SKIP);
+ }
out:
return (1 << PMAP_L1_SKIP);
@@ -366,20 +369,23 @@ out:
eax = 0;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax == 0)
+ if (eax == 0) {
goto out;
+ }
eax = 1;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
#ifdef X86_PAE
- if (!(edx & CPU_FEATURE2_PAE))
+ if (!(edx & CPU_FEATURE2_PAE)) {
boot_panic(pmap_panic_no_pae);
+ }
return (1 << PMAP_L1_SKIP);
#else /* X86_PAE */
- if (edx & CPU_FEATURE2_PSE)
+ if (edx & CPU_FEATURE2_PSE) {
return (1 << PMAP_L1_SKIP);
+ }
#endif /* X86_PAE */
out:
@@ -393,8 +399,9 @@ out:
static void __boot
pmap_boot_enable_pgext(unsigned long pgsize)
{
- if (pgsize == PAGE_SIZE)
+ if (pgsize == PAGE_SIZE) {
return;
+ }
/*
* On 64-bits systems, PAE is already enabled.
@@ -441,8 +448,9 @@ pmap_setup_paging(void)
directmap_end = biosmem_directmap_end();
- if (directmap_end > (VM_MAX_DIRECTMAP_ADDRESS - VM_MIN_DIRECTMAP_ADDRESS))
+ if (directmap_end > (VM_MAX_DIRECTMAP_ADDRESS - VM_MIN_DIRECTMAP_ADDRESS)) {
boot_panic(pmap_panic_directmap_msg);
+ }
va = VM_MIN_DIRECTMAP_ADDRESS;
pa = 0;
@@ -478,8 +486,9 @@ pmap_setup_paging(void)
cpu_table->pdpt_pa = BOOT_VTOP((unsigned long)pmap_cpu_kpdpts[0]);
root_ptp = (void *)cpu_table->pdpt_pa;
- for (i = 0; i < PMAP_NR_RPTPS; i++)
+ for (i = 0; i < PMAP_NR_RPTPS; i++) {
root_ptp[i] = (cpu_table->root_ptp_pa + (i * PAGE_SIZE)) | PMAP_PTE_P;
+ }
#endif /* X86_PAE */
return root_ptp;
@@ -597,8 +606,9 @@ pmap_walk_vas(unsigned long start, unsigned long end, pmap_walk_fn_t walk_fn)
do {
#ifdef __LP64__
/* Handle long mode canonical form */
- if (va == VM_MAX_ADDRESS)
+ if (va == VM_MAX_ADDRESS) {
va = VM_MIN_KERNEL_ADDRESS;
+ }
#endif /* __LP64__ */
level = PMAP_NR_LEVELS - 1;
@@ -610,13 +620,15 @@ pmap_walk_vas(unsigned long start, unsigned long end, pmap_walk_fn_t walk_fn)
index = pmap_pte_index(va, pt_level);
pte = &ptp[index];
- if (!pmap_pte_valid(*pte))
+ if (!pmap_pte_valid(*pte)) {
break;
+ }
walk_fn(ptp_pa, index, level);
- if ((level == 0) || pmap_pte_large(*pte))
+ if ((level == 0) || pmap_pte_large(*pte)) {
break;
+ }
level--;
ptp_pa = *pte & PMAP_PA_MASK;
@@ -635,8 +647,9 @@ pmap_setup_global_page(phys_addr_t ptp_pa, unsigned int index,
pte = &pmap_ptp_from_pa(ptp_pa)[index];
- if ((level == 0) || pmap_pte_large(*pte))
+ if ((level == 0) || pmap_pte_large(*pte)) {
*pte |= PMAP_PTE_G;
+ }
}
static void __init
@@ -666,8 +679,9 @@ pmap_update_oplist_create(struct pmap_update_oplist **oplistp)
oplist = kmem_cache_alloc(&pmap_update_oplist_cache);
- if (oplist == NULL)
+ if (oplist == NULL) {
return ERROR_NOMEM;
+ }
*oplistp = oplist;
return 0;
@@ -697,8 +711,9 @@ pmap_update_oplist_prepare(struct pmap_update_oplist *oplist,
struct pmap *pmap)
{
if (oplist->pmap != pmap) {
- if (oplist->pmap != NULL)
+ if (oplist->pmap != NULL) {
pmap_update(oplist->pmap);
+ }
oplist->pmap = pmap;
} else if (oplist->nr_ops == ARRAY_SIZE(oplist->ops)) {
@@ -710,8 +725,9 @@ pmap_update_oplist_prepare(struct pmap_update_oplist *oplist,
static struct pmap_update_op *
pmap_update_oplist_prev_op(struct pmap_update_oplist *oplist)
{
- if (oplist->nr_ops == 0)
+ if (oplist->nr_ops == 0) {
return NULL;
+ }
return &oplist->ops[oplist->nr_ops - 1];
}
@@ -746,8 +762,9 @@ pmap_update_oplist_count_mappings(const struct pmap_update_oplist *oplist,
for (i = 0; i < oplist->nr_ops; i++) {
op = &oplist->ops[i];
- if (!cpumap_test(&op->cpumap, cpu))
+ if (!cpumap_test(&op->cpumap, cpu)) {
continue;
+ }
switch (op->operation) {
case PMAP_UPDATE_OP_ENTER:
@@ -857,8 +874,9 @@ pmap_bootstrap(void)
cpumap_zero(&pmap_booter_cpumap);
cpumap_set(&pmap_booter_cpumap, 0);
- if (cpu_has_global_pages())
+ if (cpu_has_global_pages()) {
pmap_setup_global_pages();
+ }
}
void __init
@@ -866,10 +884,11 @@ pmap_ap_bootstrap(void)
{
cpu_local_assign(pmap_current_ptr, kernel_pmap);
- if (cpu_has_global_pages())
+ if (cpu_has_global_pages()) {
cpu_enable_global_pages();
- else
+ } else {
cpu_tlb_flush();
+ }
}
static void __init
@@ -880,8 +899,9 @@ pmap_setup_set_ptp_type(phys_addr_t ptp_pa, unsigned int index,
(void)index;
- if (level == 0)
+ if (level == 0) {
return;
+ }
page = vm_page_lookup(ptp_pa);
assert(page != NULL);
@@ -943,13 +963,14 @@ pmap_copy_cpu_table_recursive(const pmap_pte_t *sptp, unsigned int level,
i++, va = P2END(va, 1UL << pt_level->skip)) {
#ifdef __LP64__
/* Handle long mode canonical form */
- if (va == VM_MAX_ADDRESS)
+ if (va == VM_MAX_ADDRESS) {
va = VM_MIN_KERNEL_ADDRESS;
+ }
#endif /* __LP64__ */
- if (!pmap_pte_valid(sptp[i]))
+ if (!pmap_pte_valid(sptp[i])) {
continue;
- else if (pmap_pte_large(sptp[i])) {
+ } else if (pmap_pte_large(sptp[i])) {
dptp[i] = sptp[i];
continue;
}
@@ -959,9 +980,9 @@ pmap_copy_cpu_table_recursive(const pmap_pte_t *sptp, unsigned int level,
pa = vm_page_to_pa(page);
dptp[i] = (sptp[i] & ~PMAP_PA_MASK) | (pa & PMAP_PA_MASK);
- if (((level - 1) == 0) || pmap_pte_large(sptp[i]))
+ if (((level - 1) == 0) || pmap_pte_large(sptp[i])) {
pmap_copy_cpu_table_page(pmap_pte_next(sptp[i]), level - 1, page);
- else
+ } else
pmap_copy_cpu_table_recursive(pmap_pte_next(sptp[i]),
level - 1, page, va);
}
@@ -989,8 +1010,9 @@ pmap_copy_cpu_table(unsigned int cpu)
cpu_table->pdpt = pmap_cpu_kpdpts[cpu];
cpu_table->pdpt_pa = BOOT_VTOP((unsigned long)pmap_cpu_kpdpts[cpu]);
- for (i = 0; i < PMAP_NR_RPTPS; i++)
+ for (i = 0; i < PMAP_NR_RPTPS; i++) {
cpu_table->pdpt[i] = (cpu_table->root_ptp_pa + (i * PAGE_SIZE)) | PMAP_PTE_P;
+ }
#endif /* X86_PAE */
}
@@ -1007,8 +1029,9 @@ pmap_mp_setup(void)
error = cpumap_create(&cpumap);
- if (error)
+ if (error) {
panic("pmap: unable to create syncer cpumap");
+ }
for (cpu = 1; cpu < cpu_count(); cpu++) {
pmap_update_request_array_init(percpu_ptr(pmap_update_request_array,
@@ -1027,8 +1050,9 @@ pmap_mp_setup(void)
thread_attr_set_priority(&attr, THREAD_SCHED_RT_PRIO_MIN);
error = thread_create(&syncer->thread, &attr, pmap_sync, syncer);
- if (error)
+ if (error) {
panic("pmap: unable to create syncer thread");
+ }
oplist = thread_tsd_get(syncer->thread, pmap_oplist_tsd_key);
thread_tsd_set(syncer->thread, pmap_oplist_tsd_key, NULL);
@@ -1037,8 +1061,9 @@ pmap_mp_setup(void)
cpumap_destroy(cpumap);
- for (cpu = 1; cpu < cpu_count(); cpu++)
+ for (cpu = 1; cpu < cpu_count(); cpu++) {
pmap_copy_cpu_table(cpu);
+ }
pmap_do_remote_updates = 1;
}
@@ -1051,8 +1076,9 @@ pmap_thread_init(struct thread *thread)
error = pmap_update_oplist_create(&oplist);
- if (error)
+ if (error) {
return error;
+ }
thread_tsd_set(thread, pmap_oplist_tsd_key, oplist);
return 0;
@@ -1072,11 +1098,13 @@ pmap_kextract(unsigned long va, phys_addr_t *pap)
pt_level = &pmap_pt_levels[level];
pte = &ptp[pmap_pte_index(va, pt_level)];
- if (!pmap_pte_valid(*pte))
+ if (!pmap_pte_valid(*pte)) {
return ERROR_FAULT;
+ }
- if ((level == 0) || pmap_pte_large(*pte))
+ if ((level == 0) || pmap_pte_large(*pte)) {
break;
+ }
level--;
ptp = pmap_pte_next(*pte);
@@ -1094,11 +1122,13 @@ pmap_create(struct pmap **pmapp)
pmap = kmem_cache_alloc(&pmap_cache);
- if (pmap == NULL)
+ if (pmap == NULL) {
return ERROR_NOMEM;
+ }
- for (i = 0; i < ARRAY_SIZE(pmap->cpu_tables); i++)
+ for (i = 0; i < ARRAY_SIZE(pmap->cpu_tables); i++) {
pmap->cpu_tables[i] = NULL;
+ }
*pmapp = pmap;
return 0;
@@ -1119,8 +1149,9 @@ pmap_enter_local(struct pmap *pmap, unsigned long va, phys_addr_t pa,
pte_bits = PMAP_PTE_RW;
- if (pmap != kernel_pmap)
+ if (pmap != kernel_pmap) {
pte_bits |= PMAP_PTE_US;
+ }
level = PMAP_NR_LEVELS - 1;
ptp = pmap_ptp_from_pa(pmap->cpu_tables[cpu_id()]->root_ptp_pa);
@@ -1129,12 +1160,13 @@ pmap_enter_local(struct pmap *pmap, unsigned long va, phys_addr_t pa,
pt_level = &pmap_pt_levels[level];
pte = &ptp[pmap_pte_index(va, pt_level)];
- if (level == 0)
+ if (level == 0) {
break;
+ }
- if (pmap_pte_valid(*pte))
+ if (pmap_pte_valid(*pte)) {
ptp = pmap_pte_next(*pte);
- else {
+ } else {
page = vm_page_alloc(0, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_PMAP);
assert(page != NULL);
ptp_pa = vm_page_to_pa(page);
@@ -1166,9 +1198,9 @@ pmap_enter(struct pmap *pmap, unsigned long va, phys_addr_t pa,
pmap_update_oplist_prepare(oplist, pmap);
op = pmap_update_oplist_prepare_op(oplist);
- if (flags & PMAP_PEF_GLOBAL)
+ if (flags & PMAP_PEF_GLOBAL) {
cpumap_copy(&op->cpumap, cpumap_all());
- else {
+ } else {
cpumap_zero(&op->cpumap);
cpumap_set(&op->cpumap, cpu_id());
}
@@ -1195,8 +1227,9 @@ pmap_remove_local_single(struct pmap *pmap, unsigned long va)
pt_level = &pmap_pt_levels[level];
pte = &ptp[pmap_pte_index(va, pt_level)];
- if (level == 0)
+ if (level == 0) {
break;
+ }
level--;
ptp = pmap_pte_next(*pte);
@@ -1295,8 +1328,9 @@ pmap_protect(struct pmap *pmap, unsigned long va, int prot,
static void
pmap_flush_tlb(struct pmap *pmap, unsigned long start, unsigned long end)
{
- if ((pmap != pmap_current()) && (pmap != kernel_pmap))
+ if ((pmap != pmap_current()) && (pmap != kernel_pmap)) {
return;
+ }
while (start < end) {
cpu_tlb_flush_va(start);
@@ -1307,13 +1341,15 @@ pmap_flush_tlb(struct pmap *pmap, unsigned long start, unsigned long end)
static void
pmap_flush_tlb_all(struct pmap *pmap)
{
- if ((pmap != pmap_current()) && (pmap != kernel_pmap))
+ if ((pmap != pmap_current()) && (pmap != kernel_pmap)) {
return;
+ }
- if (pmap == kernel_pmap)
+ if (pmap == kernel_pmap) {
cpu_tlb_flush_all();
- else
+ } else {
cpu_tlb_flush();
+ }
}
static void
@@ -1322,8 +1358,9 @@ pmap_update_enter(struct pmap *pmap, int flush,
{
pmap_enter_local(pmap, args->va, args->pa, args->prot, args->flags);
- if (flush)
+ if (flush) {
pmap_flush_tlb(pmap, args->va, args->va + PAGE_SIZE);
+ }
}
static void
@@ -1332,8 +1369,9 @@ pmap_update_remove(struct pmap *pmap, int flush,
{
pmap_remove_local(pmap, args->start, args->end);
- if (flush)
+ if (flush) {
pmap_flush_tlb(pmap, args->start, args->end);
+ }
}
static void
@@ -1342,8 +1380,9 @@ pmap_update_protect(struct pmap *pmap, int flush,
{
pmap_protect_local(pmap, args->start, args->end, args->prot);
- if (flush)
+ if (flush) {
pmap_flush_tlb(pmap, args->start, args->end);
+ }
}
static void
@@ -1362,8 +1401,9 @@ pmap_update_local(const struct pmap_update_oplist *oplist,
for (i = 0; i < oplist->nr_ops; i++) {
op = &oplist->ops[i];
- if (!cpumap_test(&op->cpumap, cpu_id()))
+ if (!cpumap_test(&op->cpumap, cpu_id())) {
continue;
+ }
switch (op->operation) {
case PMAP_UPDATE_OP_ENTER:
@@ -1386,8 +1426,9 @@ pmap_update_local(const struct pmap_update_oplist *oplist,
}
}
- if (global_tlb_flush)
+ if (global_tlb_flush) {
pmap_flush_tlb_all(oplist->pmap);
+ }
}
void
@@ -1403,8 +1444,9 @@ pmap_update(struct pmap *pmap)
oplist = pmap_update_oplist_get();
- if (pmap != oplist->pmap)
+ if (pmap != oplist->pmap) {
return;
+ }
assert(oplist->nr_ops != 0);
@@ -1435,8 +1477,9 @@ pmap_update(struct pmap *pmap)
mutex_lock(&request->lock);
- while (!request->done)
+ while (!request->done) {
condition_wait(&request->cond, &request->lock);
+ }
mutex_unlock(&request->lock);
}
@@ -1462,8 +1505,9 @@ pmap_sync(void *arg)
for (;;) {
mutex_lock(&queue->lock);
- while (list_empty(&queue->requests))
+ while (list_empty(&queue->requests)) {
condition_wait(&queue->cond, &queue->lock);
+ }
request = list_first_entry(&queue->requests,
struct pmap_update_request, node);
@@ -1488,8 +1532,9 @@ pmap_load(struct pmap *pmap)
assert(!cpu_intr_enabled());
assert(!thread_preempt_enabled());
- if (pmap_current() == pmap)
+ if (pmap_current() == pmap) {
return;
+ }
/* TODO Lazy TLB invalidation */
diff --git a/arch/x86/machine/strace.c b/arch/x86/machine/strace.c
index 6058c561..42af6b20 100644
--- a/arch/x86/machine/strace.c
+++ b/arch/x86/machine/strace.c
@@ -46,15 +46,18 @@ strace_lookup(unsigned long addr, unsigned long *offset, unsigned long *size)
for (sym = strace_symtab; sym < strace_symtab_end; sym++) {
if ((sym->size != 0)
&& (addr >= sym->value)
- && (addr <= (sym->value + sym->size)))
+ && (addr <= (sym->value + sym->size))) {
break;
+ }
}
- if (sym >= strace_symtab_end)
+ if (sym >= strace_symtab_end) {
return NULL;
+ }
- if (sym->name == 0)
+ if (sym->name == 0) {
return NULL;
+ }
*offset = addr - sym->value;
*size = sym->size;
@@ -69,9 +72,9 @@ strace_show_one(unsigned int index, unsigned long ip)
name = strace_lookup(ip, &offset, &size);
- if (name == NULL)
+ if (name == NULL) {
printk("strace: #%u [" STRACE_ADDR_FORMAT "]\n", index, ip);
- else
+ } else
printk("strace: #%u [" STRACE_ADDR_FORMAT "] %s+%#lx/%#lx\n",
index, ip, name, offset, size);
}
@@ -91,8 +94,9 @@ strace_show(unsigned long ip, unsigned long bp)
frame = (void **)bp;
for (;;) {
- if (frame == NULL)
+ if (frame == NULL) {
break;
+ }
error = pmap_kextract((unsigned long)&frame[1], &pa);
@@ -160,8 +164,9 @@ strace_lookup_section(const struct multiboot_raw_info *mbi, const void *table,
shdr = table + (i * mbi->shdr_size);
shdr_name = &shstrtab[shdr->name];
- if (strcmp(shdr_name, name) == 0)
+ if (strcmp(shdr_name, name) == 0) {
return shdr;
+ }
}
return NULL;
@@ -176,8 +181,9 @@ strace_setup(const struct multiboot_raw_info *mbi)
const char *shstrtab;
const void *table;
- if (!(mbi->flags & MULTIBOOT_LOADER_SHDR) || (mbi->shdr_num == 0))
+ if (!(mbi->flags & MULTIBOOT_LOADER_SHDR) || (mbi->shdr_num == 0)) {
goto no_syms;
+ }
size = mbi->shdr_num * mbi->shdr_size;
table = vm_kmem_map_pa(mbi->shdr_addr, size, &map_addr, &map_size);
@@ -217,14 +223,16 @@ strace_setup(const struct multiboot_raw_info *mbi)
strace_symtab = strace_copy_section(symtab_hdr);
- if (strace_symtab == NULL)
+ if (strace_symtab == NULL) {
goto error_symtab;
+ }
strace_symtab_end = (void *)strace_symtab + symtab_hdr->size;
strace_strtab = strace_copy_section(strtab_hdr);
- if (strace_strtab == NULL)
+ if (strace_strtab == NULL) {
goto error_strtab;
+ }
vm_kmem_unmap_pa(shstrtab_map_addr, shstrtab_map_size);
vm_kmem_unmap_pa(map_addr, map_size);
diff --git a/arch/x86/machine/string.c b/arch/x86/machine/string.c
index e78c41cd..26ce941b 100644
--- a/arch/x86/machine/string.c
+++ b/arch/x86/machine/string.c
@@ -78,8 +78,9 @@ memcmp(const void *s1, const void *s2, size_t n)
{
unsigned char c1, c2;
- if (n == 0)
+ if (n == 0) {
return 0;
+ }
asm volatile("repe cmpsb"
: "+D" (s1), "+S" (s2), "+c" (n)
diff --git a/arch/x86/machine/tcb.c b/arch/x86/machine/tcb.c
index a154be11..df1b222c 100644
--- a/arch/x86/machine/tcb.c
+++ b/arch/x86/machine/tcb.c
@@ -40,8 +40,9 @@ tcb_init(struct tcb *tcb, void *stack, void (*fn)(void))
error = pmap_thread_init(thread_from_tcb(tcb));
- if (error)
+ if (error) {
return error;
+ }
tcb->bp = 0;
tcb->sp = (unsigned long)stack + STACK_SIZE - sizeof(unsigned long);
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c
index 15d850c2..49198123 100644
--- a/arch/x86/machine/trap.c
+++ b/arch/x86/machine/trap.c
@@ -171,8 +171,9 @@ trap_setup(void)
{
size_t i;
- for (i = 0; i < CPU_IDT_SIZE; i++)
+ for (i = 0; i < CPU_IDT_SIZE; i++) {
trap_install(i, TRAP_HF_NOPREEMPT, trap_isr_default, trap_default);
+ }
/* Architecture defined traps */
trap_install(TRAP_DE, 0, trap_isr_divide_error, trap_default);
@@ -233,13 +234,15 @@ trap_main(struct trap_frame *frame)
* latter (usually device interrupts), disable preemption to make sure
* there won't be any context switch while handling them.
*/
- if (handler->flags & TRAP_HF_NOPREEMPT)
+ if (handler->flags & TRAP_HF_NOPREEMPT) {
thread_preempt_disable();
+ }
handler->fn(frame);
- if (handler->flags & TRAP_HF_NOPREEMPT)
+ if (handler->flags & TRAP_HF_NOPREEMPT) {
thread_preempt_enable_no_resched();
+ }
thread_schedule();
}