summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-10-07 02:07:09 +0200
committerRichard Braun <rbraun@sceen.net>2017-10-07 02:07:09 +0200
commite6552b83ccac77935f55093b453667e8f36f1264 (patch)
treee34218a1fa9344ce85a450c5855bc2056f73914d
parentc3b61eb930d502b952a723cfc152f904b3e88ac9 (diff)
First bootmem prototype
-rw-r--r--arch/arm/machine/boot.c27
-rw-r--r--arch/arm/machine/boot.h18
-rw-r--r--arch/arm/machine/boot_asm.S3
-rw-r--r--arch/arm/machine/pmem.h20
-rw-r--r--arch/arm/x15.lds.S5
-rw-r--r--arch/x86/machine/boot.c78
-rw-r--r--kern/Makefile1
-rw-r--r--kern/bootmem.c584
-rw-r--r--kern/bootmem.h95
9 files changed, 729 insertions, 102 deletions
diff --git a/arch/arm/machine/boot.c b/arch/arm/machine/boot.c
index eb054f66..c8a79d3c 100644
--- a/arch/arm/machine/boot.c
+++ b/arch/arm/machine/boot.c
@@ -20,33 +20,44 @@
#include <stdint.h>
#include <kern/init.h>
+#include <kern/bootmem.h>
#include <machine/boot.h>
#include <machine/cpu.h>
#include <machine/pmap.h>
+#include <machine/pmem.h>
#include <vm/vm_kmem.h>
+#define BOOT_UART_DATA_REG 0x9000000
+
alignas(CPU_DATA_ALIGN) char boot_stack[BOOT_STACK_SIZE] __bootdata;
-static char boot_hello_msg[] __bootdata = "Hello, world!\r\n";
+void boot_setup_paging(void);
-static void __boot
-boot_hello_world(void)
+void __boot
+boot_panic(const char *s)
{
- volatile unsigned long *uart_data_reg = (volatile unsigned long *)0x9000000;
- const char *s = boot_hello_msg;
+ volatile unsigned long *uart_data_reg;
+
+ uart_data_reg = (volatile unsigned long *)BOOT_UART_DATA_REG;
while (*s != '\0') {
*uart_data_reg = *s;
s++;
}
-}
-void boot_setup_paging(void);
+ for (;;);
+}
void __boot
boot_setup_paging(void)
{
- boot_hello_world();
+ bootmem_register_zone(PMEM_ZONE_DMA, true, PMEM_RAM_START, PMEM_DMA_LIMIT);
+ bootmem_setup(false);
+
+#if 1
+ void *page_addr1 = bootmem_alloc(3);
+ void *page_addr2 = bootmem_alloc(3);
+#endif
for (;;);
}
diff --git a/arch/arm/machine/boot.h b/arch/arm/machine/boot.h
index 0181f7a0..46b79255 100644
--- a/arch/arm/machine/boot.h
+++ b/arch/arm/machine/boot.h
@@ -21,6 +21,7 @@
#include <kern/macros.h>
#include <machine/page.h>
#include <machine/pmap.h>
+#include <machine/pmem.h>
/*
* Size of the stack used when booting a processor.
@@ -31,22 +32,31 @@
#define BOOT_TEXT_SECTION .boot.text
#define BOOT_DATA_SECTION .boot.data
-#define BOOT_RAM_START 0x40000000 /* XXX Specific to the Qemu virtual machine */
-#define BOOT_KERNEL_OFFSET (PMAP_START_KERNEL_ADDRESS - BOOT_RAM_START)
+#define BOOT_KERNEL_OFFSET (PMAP_START_KERNEL_ADDRESS - PMEM_RAM_START)
-#define BOOT_RTOL(addr) ((addr) - BOOT_RAM_START)
+#define BOOT_RTOL(addr) ((addr) - PMEM_RAM_START)
#define BOOT_VTOL(addr) ((addr) - PMAP_START_KERNEL_ADDRESS)
-#define BOOT_VTOP(addr) ((addr) - BOOT_RAM_START)
+#define BOOT_VTOP(addr) ((addr) - BOOT_KERNEL_OFFSET)
#ifndef __ASSEMBLER__
+#include <stdnoreturn.h>
+
#include <kern/init.h>
#define __boot __section(QUOTE(BOOT_TEXT_SECTION))
#define __bootdata __section(QUOTE(BOOT_DATA_SECTION)) __attribute__((used))
/*
+ * Boundaries of the .boot section.
+ */
+extern char _boot;
+extern char _boot_end;
+
+noreturn void boot_panic(const char *s);
+
+/*
* Log kernel version and other architecture-specific information.
*/
void boot_log_info(void);
diff --git a/arch/arm/machine/boot_asm.S b/arch/arm/machine/boot_asm.S
index ae250b1b..c78c710e 100644
--- a/arch/arm/machine/boot_asm.S
+++ b/arch/arm/machine/boot_asm.S
@@ -18,6 +18,7 @@
#include <machine/asm.h>
#include <machine/boot.h>
#include <machine/pmap.h>
+#include <machine/pmem.h>
.section BOOT_LOAD_SECTION, "awx"
@@ -38,7 +39,7 @@ ASM_FUNC(_start):
* RAM size isn't known, we can't use it for a stack. As a result,
* perform the relocation in assembly without using a stack.
*/
- ldr %r5, =(BOOT_RAM_START) /* Load RAM address in %r5 */
+ ldr %r5, =(PMEM_RAM_START) /* Load RAM address in %r5 */
mov %r6, #0 /* Load kernel address in %r6 */
ldr %r0, boot_kernel_end /* Load kernel end virtual address
in %r0 */
diff --git a/arch/arm/machine/pmem.h b/arch/arm/machine/pmem.h
index c3314d75..9a072401 100644
--- a/arch/arm/machine/pmem.h
+++ b/arch/arm/machine/pmem.h
@@ -19,6 +19,8 @@
*
* This file is a top header in the inclusion hierarchy, and shouldn't include
* other headers that may cause circular dependencies.
+ *
+ * XXX Specific to the Qemu virtual machine.
*/
#ifndef _ARM_PMEM_H
@@ -30,21 +32,21 @@
* Zone boundaries.
*/
-#define PMEM_DMA_LIMIT DECL_CONST(0x1000000, UL)
-#define PMEM_DIRECTMAP_LIMIT DECL_CONST(0x38000000, ULL)
-#define PMEM_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#define PMEM_RAM_START DECL_CONST(0x40000000, UL)
+#define PMEM_DMA_LIMIT DECL_CONST(0x44000000, UL)
+#define PMEM_DMA32_LIMIT PMEM_DMA_LIMIT
+#define PMEM_DIRECTMAP_LIMIT PMEM_DMA_LIMIT
+#define PMEM_HIGHMEM_LIMIT PMEM_DMA_LIMIT
-#define PMEM_MAX_ZONES 3
+#define PMEM_MAX_ZONES 1
/*
* Zone vm_page indexes.
*/
#define PMEM_ZONE_DMA 0
-#define PMEM_ZONE_DMA32 1
-
-#define PMEM_ZONE_DMA32 1
-#define PMEM_ZONE_DIRECTMAP 1 /* Alias for the DMA32 zone */
-#define PMEM_ZONE_HIGHMEM 2
+#define PMEM_ZONE_DMA32 PMEM_ZONE_DMA
+#define PMEM_ZONE_DIRECTMAP PMEM_ZONE_DMA
+#define PMEM_ZONE_HIGHMEM PMEM_ZONE_DMA
#endif /* _ARM_PMEM_H */
diff --git a/arch/arm/x15.lds.S b/arch/arm/x15.lds.S
index d867de29..fa2698d1 100644
--- a/arch/arm/x15.lds.S
+++ b/arch/arm/x15.lds.S
@@ -7,6 +7,7 @@ ENTRY(_start)
#include <machine/boot.h>
#include <machine/cpu.h>
#include <machine/page.h>
+#include <machine/pmem.h>
PHDRS
{
@@ -28,7 +29,7 @@ SECTIONS
*(BOOT_LOAD_SECTION)
} : load
- . += BOOT_RAM_START;
+ . += PMEM_RAM_START;
_boot = .;
.boot ALIGN(PAGE_SIZE) : AT(BOOT_RTOL(ADDR(.boot))) {
@@ -39,7 +40,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_boot_end = .;
- . += (PMAP_START_KERNEL_ADDRESS - BOOT_RAM_START);
+ . += (PMAP_START_KERNEL_ADDRESS - PMEM_RAM_START);
_init = .;
.init ALIGN(PAGE_SIZE) : AT(BOOT_VTOL(ADDR(.init))) {
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index afc41d70..9b05d60d 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -120,84 +120,6 @@ static char boot_panic_meminfo_msg[] __bootdata
static char boot_panic_cmdline_msg[] __bootdata
= "boot: command line too long";
-void * __boot
-boot_memcpy(void *dest, const void *src, size_t n)
-{
- const char *src_ptr;
- char *dest_ptr;
- size_t i;
-
- dest_ptr = dest;
- src_ptr = src;
-
- for (i = 0; i < n; i++) {
- *dest_ptr = *src_ptr;
- dest_ptr++;
- src_ptr++;
- }
-
- return dest;
-}
-
-void * __boot
-boot_memmove(void *dest, const void *src, size_t n)
-{
- const char *src_ptr;
- char *dest_ptr;
- size_t i;
-
- if (dest <= src) {
- dest_ptr = dest;
- src_ptr = src;
-
- for (i = 0; i < n; i++) {
- *dest_ptr = *src_ptr;
- dest_ptr++;
- src_ptr++;
- }
- } else {
- dest_ptr = dest + n - 1;
- src_ptr = src + n - 1;
-
- for (i = 0; i < n; i++) {
- *dest_ptr = *src_ptr;
- dest_ptr--;
- src_ptr--;
- }
- }
-
- return dest;
-}
-
-void * __boot
-boot_memset(void *s, int c, size_t n)
-{
- char *buffer;
- size_t i;
-
- buffer = s;
-
- for (i = 0; i < n; i++) {
- buffer[i] = c;
- }
-
- return s;
-}
-
-size_t __boot
-boot_strlen(const char *s)
-{
- const char *start;
-
- start = s;
-
- while (*s != '\0') {
- s++;
- }
-
- return (s - start);
-}
-
void __boot
boot_panic(const char *msg)
{
diff --git a/kern/Makefile b/kern/Makefile
index 0aa96fc3..dfd1cbd8 100644
--- a/kern/Makefile
+++ b/kern/Makefile
@@ -1,6 +1,7 @@
x15_SOURCES-y += \
kern/arg.c \
kern/bitmap.c \
+ kern/bootmem.c \
kern/cbuf.c \
kern/clock.c \
kern/condition.c \
diff --git a/kern/bootmem.c b/kern/bootmem.c
new file mode 100644
index 00000000..98627754
--- /dev/null
+++ b/kern/bootmem.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2010-2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <kern/bootmem.h>
+#include <kern/error.h>
+#include <kern/macros.h>
+#include <machine/boot.h>
+#include <machine/pmem.h>
+#include <machine/types.h>
+#include <vm/vm_kmem.h>
+#include <vm/vm_page.h>
+
+#define BOOTMEM_MAX_RESERVED_RANGES 64
+
+/*
+ * Contiguous block of physical memory.
+ */
+struct bootmem_zone {
+ phys_addr_t start;
+ phys_addr_t end;
+ bool registered;
+ bool direct_mapped;
+};
+
+/*
+ * Physical zone boundaries.
+ */
+static struct bootmem_zone bootmem_zones[PMEM_MAX_ZONES] __bootdata;
+
+/*
+ * Physical memory range descriptor.
+ *
+ * The start and end addresses must not be page-aligned, since there
+ * could be more than one range inside a single page.
+ */
+struct bootmem_range {
+ phys_addr_t start;
+ phys_addr_t end;
+ bool temporary;
+};
+
+/*
+ * Sorted array of range descriptors.
+ */
+static struct bootmem_range bootmem_reserved_ranges[BOOTMEM_MAX_RESERVED_RANGES]
+ __bootdata;
+static unsigned int bootmem_nr_reserved_ranges __bootdata;
+
+/*
+ * Top-down allocations are normally preferred to avoid unnecessarily
+ * filling the DMA zone.
+ */
+struct bootmem_heap {
+ phys_addr_t start;
+ phys_addr_t end;
+ phys_addr_t bottom;
+ phys_addr_t top;
+ bool topdown;
+};
+
+static struct bootmem_heap bootmem_heap __bootdata;
+
+static char bootmem_panic_msg_zone_overlapping[] __bootdata
+ = "bootmem: zone overlapping";
+static char bootmem_panic_msg_invalid_zone_index_msg[] __bootdata
+ = "bootmem: invalid zone index";
+static char bootmem_panic_msg_zone_already_registered[] __bootdata
+ = "bootmem: zone already registered";
+static char bootmem_panic_msg_invalid_reserved_range[] __bootdata
+ = "bootmem: invalid reserved range";
+static char bootmem_panic_msg_too_many_reserved_ranges[] __bootdata
+ = "bootmem: too many reserved ranges";
+static char bootmem_panic_msg_setup[] __bootdata
+ = "bootmem: unable to set up the early memory allocator";
+static char bootmem_panic_msg_nomem[] __bootdata
+ = "bootmem: unable to allocate memory";
+static char bootmem_panic_msg_invalid_argument[] __bootdata
+ = "bootmem: invalid argument";
+
+void * __boot
+bootmem_memcpy(void *dest, const void *src, size_t n)
+{
+ const char *src_ptr;
+ char *dest_ptr;
+
+ dest_ptr = dest;
+ src_ptr = src;
+
+ for (size_t i = 0; i < n; i++) {
+ *dest_ptr = *src_ptr;
+ dest_ptr++;
+ src_ptr++;
+ }
+
+ return dest;
+}
+
+void * __boot
+bootmem_memmove(void *dest, const void *src, size_t n)
+{
+ const char *src_ptr;
+ char *dest_ptr;
+
+ if (dest <= src) {
+ dest_ptr = dest;
+ src_ptr = src;
+
+ for (size_t i = 0; i < n; i++) {
+ *dest_ptr = *src_ptr;
+ dest_ptr++;
+ src_ptr++;
+ }
+ } else {
+ dest_ptr = dest + n - 1;
+ src_ptr = src + n - 1;
+
+ for (size_t i = 0; i < n; i++) {
+ *dest_ptr = *src_ptr;
+ dest_ptr--;
+ src_ptr--;
+ }
+ }
+
+ return dest;
+}
+
+void * __boot
+bootmem_memset(void *s, int c, size_t n)
+{
+ char *buffer;
+
+ buffer = s;
+
+ for (size_t i = 0; i < n; i++) {
+ buffer[i] = c;
+ }
+
+ return s;
+}
+
+size_t __boot
+bootmem_strlen(const char *s)
+{
+ const char *start;
+
+ start = s;
+
+ while (*s != '\0') {
+ s++;
+ }
+
+ return (s - start);
+}
+
+static bool __boot
+bootmem_overlaps(phys_addr_t start1, phys_addr_t end1,
+ phys_addr_t start2, phys_addr_t end2)
+{
+ return ((end2 > start1) && (start2 < end1));
+}
+
+static bool __boot
+bootmem_included(phys_addr_t start1, phys_addr_t end1,
+ phys_addr_t start2, phys_addr_t end2)
+{
+ return ((start2 >= start1) && (end2 <= end1));
+}
+
+static void __boot
+bootmem_zone_init(struct bootmem_zone *zone, phys_addr_t start,
+ phys_addr_t end, bool direct_mapped)
+{
+ zone->start = start;
+ zone->end = end;
+ zone->registered = true;
+ zone->direct_mapped = direct_mapped;
+}
+
+static phys_addr_t __boot
+bootmem_zone_end(const struct bootmem_zone *zone)
+{
+ return zone->end;
+}
+
+static phys_addr_t __boot
+bootmem_zone_size(const struct bootmem_zone *zone)
+{
+ return zone->end - zone->start;
+}
+
+static bool __boot
+bootmem_zone_registered(const struct bootmem_zone *zone)
+{
+ return zone->registered;
+}
+
+static bool __boot
+bootmem_zone_overlaps(const struct bootmem_zone *zone,
+ phys_addr_t start, phys_addr_t end)
+{
+ return bootmem_overlaps(zone->start, zone->end, start, end);
+}
+
+static struct bootmem_zone * __boot
+bootmem_get_zone(unsigned int index)
+{
+ assert(index < ARRAY_SIZE(bootmem_zones));
+ return &bootmem_zones[index];
+}
+
+void __boot
+bootmem_register_zone(unsigned int zone_index, bool direct_mapped,
+ phys_addr_t start, phys_addr_t end)
+{
+ struct bootmem_zone *zone, *tmp;
+
+ for (size_t i = 0; i < ARRAY_SIZE(bootmem_zones); i++) {
+ tmp = bootmem_get_zone(i);
+
+ if (!bootmem_zone_registered(tmp)) {
+ continue;
+ }
+
+ if (bootmem_zone_overlaps(tmp, start, end)) {
+ boot_panic(bootmem_panic_msg_zone_overlapping);
+ }
+ }
+
+ zone = bootmem_get_zone(zone_index);
+
+ if (zone == NULL) {
+ boot_panic(bootmem_panic_msg_invalid_zone_index_msg);
+ }
+
+ if (bootmem_zone_registered(zone)) {
+ boot_panic(bootmem_panic_msg_zone_already_registered);
+ }
+
+ bootmem_zone_init(zone, start, end, direct_mapped);
+}
+
+static void __boot
+bootmem_range_init(struct bootmem_range *range, phys_addr_t start,
+ phys_addr_t end, bool temporary)
+{
+ range->start = start;
+ range->end = end;
+ range->temporary = temporary;
+}
+
+static phys_addr_t __boot
+bootmem_range_start(const struct bootmem_range *range)
+{
+ return range->start;
+}
+
+static bool __boot
+bootmem_range_temporary(const struct bootmem_range *range)
+{
+ return range->temporary;
+}
+
+static void __boot
+bootmem_range_clear_temporary(struct bootmem_range *range)
+{
+ range->temporary = false;
+}
+
+static bool __boot
+bootmem_range_overlaps(const struct bootmem_range *range,
+ phys_addr_t start, phys_addr_t end)
+{
+ return bootmem_overlaps(range->start, range->end, start, end);
+}
+
+static bool __boot
+bootmem_range_included(const struct bootmem_range *range,
+ phys_addr_t start, phys_addr_t end)
+{
+ return bootmem_included(range->start, range->end, start, end);
+}
+
+static int __boot
+bootmem_range_clip_region(const struct bootmem_range *range,
+ phys_addr_t *region_start, phys_addr_t *region_end)
+{
+ phys_addr_t range_start, range_end;
+
+ range_start = vm_page_trunc(range->start);
+ range_end = vm_page_round(range->end);
+
+ if (range_end < range->end) {
+ boot_panic(bootmem_panic_msg_invalid_reserved_range);
+ }
+
+ if ((range_end <= *region_start) || (range_start >= *region_end)) {
+ return 0;
+ }
+
+ if (range_start > *region_start) {
+ *region_end = range_start;
+ } else {
+ if (range_end >= *region_end) {
+ return ERROR_NOMEM;
+ }
+
+ *region_start = range_end;
+ }
+
+ return 0;
+}
+
+static struct bootmem_range * __boot
+bootmem_get_reserved_range(unsigned int index)
+{
+ assert(index < ARRAY_SIZE(bootmem_reserved_ranges));
+ return &bootmem_reserved_ranges[index];
+}
+
+static void __boot
+bootmem_shift_ranges_up(struct bootmem_range *range)
+{
+ struct bootmem_range *end;
+ size_t size;
+
+ end = bootmem_reserved_ranges + ARRAY_SIZE(bootmem_reserved_ranges);
+ size = (end - range - 1) * sizeof(*range);
+ bootmem_memmove(range + 1, range, size);
+}
+
+void __boot
+bootmem_reserve_range(phys_addr_t start, phys_addr_t end, bool temporary)
+{
+ struct bootmem_range *range;
+
+ if (start >= end) {
+ boot_panic(bootmem_panic_msg_invalid_reserved_range);
+ }
+
+ if (bootmem_nr_reserved_ranges >= ARRAY_SIZE(bootmem_reserved_ranges)) {
+ boot_panic(bootmem_panic_msg_too_many_reserved_ranges);
+ }
+
+ range = NULL;
+
+ for (unsigned int i = 0; i < bootmem_nr_reserved_ranges; i++) {
+ range = bootmem_get_reserved_range(i);
+
+ if (bootmem_range_overlaps(range, start, end)) {
+ /*
+ * If the range overlaps, check whether it's part of another
+ * range. For example, this applies to debugging symbols directly
+ * taken from the kernel image.
+ */
+ if (bootmem_range_included(range, start, end)) {
+ /*
+ * If it's completely included, make sure that a permanent
+ * range remains permanent.
+ *
+ * XXX This means that if one big range is first registered
+ * as temporary, and a smaller range inside of it is
+ * registered as permanent, the bigger range becomes
+ * permanent. It's not easy nor useful in practice to do
+ * better than that.
+ */
+ if (bootmem_range_temporary(range) != temporary) {
+ bootmem_range_clear_temporary(range);
+ }
+
+ return;
+ }
+
+ boot_panic(bootmem_panic_msg_invalid_reserved_range);
+ }
+
+ if (end <= bootmem_range_start(range)) {
+ break;
+ }
+ }
+
+ if (range == NULL) {
+ range = bootmem_reserved_ranges;
+ }
+
+ bootmem_shift_ranges_up(range);
+ bootmem_range_init(range, start, end, temporary);
+ bootmem_nr_reserved_ranges++;
+}
+
+static void __boot
+bootmem_heap_init(struct bootmem_heap *heap, bool topdown,
+ phys_addr_t start, phys_addr_t end)
+{
+ heap->start = start;
+ heap->end = end;
+ heap->bottom = start;
+ heap->top = end;
+ heap->topdown = topdown;
+
+ bootmem_reserve_range(start, end, false);
+}
+
+static void * __boot
+bootmem_heap_alloc(struct bootmem_heap *heap, unsigned int nr_pages)
+{
+ unsigned long addr, size;
+
+ size = vm_page_ptob(nr_pages);
+
+ if (size == 0) {
+ boot_panic(bootmem_panic_msg_invalid_argument);
+ }
+
+ if (heap->topdown) {
+ addr = heap->top - size;
+
+ if ((addr < heap->start) || (addr > heap->top)) {
+ boot_panic(bootmem_panic_msg_nomem);
+ }
+
+ heap->top = addr;
+ } else {
+ unsigned long end;
+
+ addr = heap->bottom;
+ end = addr + size;
+
+ if ((end > heap->end) || (end < heap->bottom)) {
+ boot_panic(bootmem_panic_msg_nomem);
+ }
+
+ heap->bottom = end;
+ }
+
+ return bootmem_memset((void *)addr, 0, size);
+}
+
+static struct bootmem_heap * __boot
+bootmem_get_heap(void)
+{
+ return &bootmem_heap;
+}
+
+/*
+ * Find available memory.
+ *
+ * The search starts at the given start address, up to the given end address.
+ * If a range is found, it is stored through the region_startp and region_endp
+ * pointers.
+ *
+ * The range boundaries are page-aligned on return.
+ */
+static int __boot
+bootmem_find_avail(phys_addr_t start, phys_addr_t end,
+ phys_addr_t *region_start, phys_addr_t *region_end)
+{
+ phys_addr_t orig_start;
+ int error;
+
+ assert(start <= end);
+
+ orig_start = start;
+ start = vm_page_round(start);
+ end = vm_page_trunc(end);
+
+ if ((start < orig_start) || (start >= end)) {
+ return ERROR_INVAL;
+ }
+
+ *region_start = start;
+ *region_end = end;
+
+ for (unsigned int i = 0; i < bootmem_nr_reserved_ranges; i++) {
+ error = bootmem_range_clip_region(bootmem_get_reserved_range(i),
+ region_start, region_end);
+
+ if (error) {
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+void __boot
+bootmem_setup(bool topdown)
+{
+ phys_addr_t heap_start, heap_end, max_heap_start, max_heap_end;
+ phys_addr_t start, end;
+ int error;
+
+ bootmem_reserve_range((uintptr_t)&_boot, BOOT_VTOP((uintptr_t)&_end), false);
+
+ /*
+ * Find some memory for the heap. Look for the largest unused area in
+ * upper memory, carefully avoiding all boot data.
+ */
+ end = bootmem_directmap_end();
+
+ max_heap_start = 0;
+ max_heap_end = 0;
+ start = PMEM_RAM_START;
+
+ for (;;) {
+ error = bootmem_find_avail(start, end, &heap_start, &heap_end);
+
+ if (error) {
+ break;
+ }
+
+ if ((heap_end - heap_start) > (max_heap_end - max_heap_start)) {
+ max_heap_start = heap_start;
+ max_heap_end = heap_end;
+ }
+
+ start = heap_end;
+ }
+
+ if (max_heap_start >= max_heap_end) {
+ boot_panic(bootmem_panic_msg_setup);
+ }
+
+ bootmem_heap_init(bootmem_get_heap(), topdown,
+ max_heap_start, max_heap_end);
+}
+
+void * __boot
+bootmem_alloc(unsigned int nr_pages)
+{
+ return bootmem_heap_alloc(bootmem_get_heap(), nr_pages);
+}
+
+phys_addr_t __boot
+bootmem_directmap_end(void)
+{
+ if (bootmem_zone_size(bootmem_get_zone(PMEM_ZONE_DIRECTMAP)) != 0) {
+ return bootmem_zone_end(bootmem_get_zone(PMEM_ZONE_DIRECTMAP));
+ } else if (bootmem_zone_size(bootmem_get_zone(PMEM_ZONE_DMA32)) != 0) {
+ return bootmem_zone_end(bootmem_get_zone(PMEM_ZONE_DMA32));
+ } else {
+ return bootmem_zone_end(bootmem_get_zone(PMEM_ZONE_DMA));
+ }
+}
+
+#if 0
+static void __init
+bootmem_map_show(void)
+{
+ const struct bootmem_map_entry *entry, *end;
+
+ log_debug("bootmem: physical memory map:");
+
+ for (entry = bootmem_map, end = entry + bootmem_map_size;
+ entry < end;
+ entry++)
+ log_debug("bootmem: %018llx:%018llx",
+ (unsigned long long)entry->base_addr,
+ (unsigned long long)(entry->base_addr + entry->length));
+
+ log_debug("bootmem: heap: %llx:%llx",
+ (unsigned long long)bootmem_heap_start,
+ (unsigned long long)bootmem_heap_end);
+}
+#endif
diff --git a/kern/bootmem.h b/kern/bootmem.h
new file mode 100644
index 00000000..93cb6c08
--- /dev/null
+++ b/kern/bootmem.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2010-2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Early page allocator.
+ */
+
+#ifndef _KERN_BOOTMEM_H
+#define _KERN_BOOTMEM_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <kern/init.h>
+#include <machine/types.h>
+
+/*
+ * Helper functions available before paging is enabled.
+ *
+ * Any memory passed to these must also be accessible without paging.
+ */
+void * bootmem_memcpy(void *dest, const void *src, size_t n);
+void * bootmem_memmove(void *dest, const void *src, size_t n);
+void * bootmem_memset(void *s, int c, size_t n);
+size_t bootmem_strlen(const char *s);
+
+/*
+ * Register a physical memory zone.
+ *
+ * Zones are expected to be sorted in ascending order of addresses and
+ * not overlap. They are later loaded to the VM system. Set direct_mapped
+ * to true if the zone is part of the direct mapping of physical memory.
+ *
+ * This function is called before paging is enabled.
+ */
+void bootmem_register_zone(unsigned int zone_index, bool direct_mapped,
+ phys_addr_t start, phys_addr_t end);
+
+/*
+ * Report reserved addresses to the bootmem module.
+ *
+ * The kernel is automatically reserved.
+ *
+ * Once all reserved ranges have been registered, the user can initialize the
+ * early page allocator.
+ *
+ * If the range is marked temporary, it will be unregistered once
+ * the boot data have been saved/consumed so that their backing
+ * pages are loaded into the VM system.
+ *
+ * This function is called before paging is enabled.
+ */
+void bootmem_reserve_range(phys_addr_t start, phys_addr_t end, bool temporary);
+
+/*
+ * Initialize the early page allocator.
+ *
+ * This function builds a heap based on the registered zones while carefuling
+ * avoiding reserved data.
+ *
+ * This function is called before paging is enabled.
+ */
+void bootmem_setup(bool topdown);
+
+/*
+ * Allocate contiguous physical pages.
+ *
+ * The pages returned are guaranteed to be part of the direct physical
+ * mapping when paging is enabled.
+ *
+ * This function should only be used to allocate initial page table pages.
+ * Those pages are later loaded into the VM system (as reserved pages)
+ * which means they can be freed like other regular pages. Users should
+ * fix up the type of those pages once the VM system is initialized.
+ *
+ * This function is called before paging is enabled.
+ */
+void * bootmem_alloc(unsigned int nr_pages);
+
+phys_addr_t bootmem_directmap_end(void);
+
+#endif /* _KERN_BOOTMEM_H */