diff options
-rw-r--r-- | arch/x86/machine/biosmem.c | 104 | ||||
-rw-r--r-- | arch/x86/machine/param.h | 22 | ||||
-rw-r--r-- | vm/vm_page.c | 331 | ||||
-rw-r--r-- | vm/vm_page.h | 26 |
4 files changed, 242 insertions, 241 deletions
diff --git a/arch/x86/machine/biosmem.c b/arch/x86/machine/biosmem.c index dd55b04b..539ea3ac 100644 --- a/arch/x86/machine/biosmem.c +++ b/arch/x86/machine/biosmem.c @@ -97,15 +97,15 @@ static unsigned int biosmem_map_size __bootdata; /* * Contiguous block of physical memory. */ -struct biosmem_segment { +struct biosmem_zone { phys_addr_t start; phys_addr_t end; }; /* - * Physical segment boundaries. + * Physical zone boundaries. */ -static struct biosmem_segment biosmem_segments[VM_PAGE_MAX_SEGS] __bootdata; +static struct biosmem_zone biosmem_zones[VM_PAGE_MAX_ZONES] __bootdata; /* * Boundaries of the simple bootstrap heap. @@ -121,7 +121,7 @@ static phys_addr_t biosmem_heap_end __bootdata; * Boot allocation policy. * * Top-down allocations are normally preferred to avoid unnecessarily - * filling the DMA segment. + * filling the DMA zone. */ static bool biosmem_heap_topdown __bootdata; @@ -133,8 +133,8 @@ static char biosmem_panic_too_big_msg[] __bootdata = "biosmem: too many memory map entries"; static char biosmem_panic_setup_msg[] __bootdata = "biosmem: unable to set up the early memory allocator"; -static char biosmem_panic_noseg_msg[] __bootdata - = "biosmem: unable to find any memory segment"; +static char biosmem_panic_nozone_msg[] __bootdata + = "biosmem: unable to find any memory zone"; static char biosmem_panic_inval_msg[] __bootdata = "biosmem: attempt to allocate 0 page"; static char biosmem_panic_nomem_msg[] __bootdata @@ -444,17 +444,17 @@ biosmem_map_adjust(void) * in *phys_start, and the highest address of unusable memory immediately * following physical memory in *phys_end. * - * These addresses are normally used to establish the range of a segment. + * These addresses are normally used to establish the range of a zone. */ static int __boot biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end) { const struct biosmem_map_entry *entry, *map_end; - phys_addr_t seg_start, seg_end; + phys_addr_t zone_start, zone_end; uint64_t start, end; - seg_start = (phys_addr_t)-1; - seg_end = (phys_addr_t)-1; + zone_start = (phys_addr_t)-1; + zone_end = (phys_addr_t)-1; map_end = biosmem_map + biosmem_map_size; for (entry = biosmem_map; entry < map_end; entry++) { @@ -471,46 +471,46 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end) end = vm_page_trunc(entry->base_addr + entry->length); if ((start < end) && (start < *phys_end) && (end > *phys_start)) { - if (seg_start == (phys_addr_t)-1) { - seg_start = start; + if (zone_start == (phys_addr_t)-1) { + zone_start = start; } - seg_end = end; + zone_end = end; } } - if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1)) { + if ((zone_start == (phys_addr_t)-1) || (zone_end == (phys_addr_t)-1)) { return -1; } - if (seg_start > *phys_start) { - *phys_start = seg_start; + if (zone_start > *phys_start) { + *phys_start = zone_start; } - if (seg_end < *phys_end) { - *phys_end = seg_end; + if (zone_end < *phys_end) { + *phys_end = zone_end; } return 0; } static void __boot -biosmem_set_segment(unsigned int seg_index, phys_addr_t start, phys_addr_t end) +biosmem_set_zone(unsigned int zone_index, phys_addr_t start, phys_addr_t end) { - biosmem_segments[seg_index].start = start; - biosmem_segments[seg_index].end = end; + biosmem_zones[zone_index].start = start; + biosmem_zones[zone_index].end = end; } static phys_addr_t __boot -biosmem_segment_end(unsigned int seg_index) +biosmem_zone_end(unsigned int zone_index) { - return biosmem_segments[seg_index].end; + return biosmem_zones[zone_index].end; } static phys_addr_t __boot -biosmem_segment_size(unsigned int seg_index) +biosmem_zone_size(unsigned int zone_index) { - return biosmem_segments[seg_index].end - biosmem_segments[seg_index].start; + return biosmem_zones[zone_index].end - biosmem_zones[zone_index].start; } static int __boot @@ -660,10 +660,10 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi) error = biosmem_map_find_avail(&phys_start, &phys_end); if (error) { - boot_panic(biosmem_panic_noseg_msg); + boot_panic(biosmem_panic_nozone_msg); } - biosmem_set_segment(VM_PAGE_SEG_DMA, phys_start, phys_end); + biosmem_set_zone(VM_PAGE_ZONE_DMA, phys_start, phys_end); phys_start = VM_PAGE_DMA_LIMIT; #ifdef VM_PAGE_DMA32_LIMIT @@ -674,7 +674,7 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi) goto out; } - biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end); + biosmem_set_zone(VM_PAGE_ZONE_DMA32, phys_start, phys_end); phys_start = VM_PAGE_DMA32_LIMIT; #endif /* VM_PAGE_DMA32_LIMIT */ @@ -685,7 +685,7 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi) goto out; } - biosmem_set_segment(VM_PAGE_SEG_DIRECTMAP, phys_start, phys_end); + biosmem_set_zone(VM_PAGE_ZONE_DIRECTMAP, phys_start, phys_end); phys_start = VM_PAGE_DIRECTMAP_LIMIT; phys_end = VM_PAGE_HIGHMEM_LIMIT; @@ -695,7 +695,7 @@ biosmem_bootstrap(const struct multiboot_raw_info *mbi) goto out; } - biosmem_set_segment(VM_PAGE_SEG_HIGHMEM, phys_start, phys_end); + biosmem_set_zone(VM_PAGE_ZONE_HIGHMEM, phys_start, phys_end); out: biosmem_setup_allocator(mbi); @@ -739,12 +739,12 @@ biosmem_bootalloc(unsigned int nr_pages) phys_addr_t __boot biosmem_directmap_end(void) { - if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0) { - return biosmem_segment_end(VM_PAGE_SEG_DIRECTMAP); - } else if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0) { - return biosmem_segment_end(VM_PAGE_SEG_DMA32); + if (biosmem_zone_size(VM_PAGE_ZONE_DIRECTMAP) != 0) { + return biosmem_zone_end(VM_PAGE_ZONE_DIRECTMAP); + } else if (biosmem_zone_size(VM_PAGE_ZONE_DMA32) != 0) { + return biosmem_zone_end(VM_PAGE_ZONE_DMA32); } else { - return biosmem_segment_end(VM_PAGE_SEG_DMA); + return biosmem_zone_end(VM_PAGE_ZONE_DMA); } } @@ -793,32 +793,32 @@ biosmem_map_show(void) #endif /* DEBUG */ static void __init -biosmem_load_segment(struct biosmem_segment *seg, uint64_t max_phys_end) +biosmem_load_zone(struct biosmem_zone *zone, uint64_t max_phys_end) { phys_addr_t phys_start, phys_end, avail_start, avail_end; - unsigned int seg_index; + unsigned int zone_index; - phys_start = seg->start; - phys_end = seg->end; - seg_index = seg - biosmem_segments; + phys_start = zone->start; + phys_end = zone->end; + zone_index = zone - biosmem_zones; if (phys_end > max_phys_end) { if (max_phys_end <= phys_start) { - printk("biosmem: warning: segment %s physically unreachable, " - "not loaded\n", vm_page_seg_name(seg_index)); + printk("biosmem: warning: zone %s physically unreachable, " + "not loaded\n", vm_page_zone_name(zone_index)); return; } - printk("biosmem: warning: segment %s truncated to %#llx\n", - vm_page_seg_name(seg_index), max_phys_end); + printk("biosmem: warning: zone %s truncated to %#llx\n", + vm_page_zone_name(zone_index), max_phys_end); phys_end = max_phys_end; } - vm_page_load(seg_index, phys_start, phys_end); + vm_page_load(zone_index, phys_start, phys_end); /* * Clip the remaining available heap to fit it into the loaded - * segment if possible. + * zone if possible. */ if ((biosmem_heap_top > phys_start) && (biosmem_heap_bottom < phys_end)) { @@ -834,7 +834,7 @@ biosmem_load_segment(struct biosmem_segment *seg, uint64_t max_phys_end) avail_end = phys_end; } - vm_page_load_heap(seg_index, avail_start, avail_end); + vm_page_load_heap(zone_index, avail_start, avail_end); } } @@ -842,7 +842,7 @@ void __init biosmem_setup(void) { uint64_t max_phys_end; - struct biosmem_segment *seg; + struct biosmem_zone *zone; struct cpu *cpu; unsigned int i; @@ -853,13 +853,13 @@ biosmem_setup(void) ? (uint64_t)-1 : (uint64_t)1 << cpu->phys_addr_width; - for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) { - if (biosmem_segment_size(i) == 0) { + for (i = 0; i < ARRAY_SIZE(biosmem_zones); i++) { + if (biosmem_zone_size(i) == 0) { break; } - seg = &biosmem_segments[i]; - biosmem_load_segment(seg, max_phys_end); + zone = &biosmem_zones[i]; + biosmem_load_zone(zone, max_phys_end); } } diff --git a/arch/x86/machine/param.h b/arch/x86/machine/param.h index 29b9e6f8..3d34fe7d 100644 --- a/arch/x86/machine/param.h +++ b/arch/x86/machine/param.h @@ -154,34 +154,34 @@ #define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL) #ifdef __LP64__ -#define VM_PAGE_MAX_SEGS 4 +#define VM_PAGE_MAX_ZONES 4 #define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL) #define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL) #define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL) #else /* __LP64__ */ #define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x38000000, ULL) #ifdef X86_PAE -#define VM_PAGE_MAX_SEGS 3 +#define VM_PAGE_MAX_ZONES 3 #define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL) #else /* X86_PAE */ -#define VM_PAGE_MAX_SEGS 3 +#define VM_PAGE_MAX_ZONES 3 #define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL) #endif /* X86_PAE */ #endif /* __LP64__ */ /* - * Physical segment indexes. + * Physical zone indexes. */ -#define VM_PAGE_SEG_DMA 0 +#define VM_PAGE_ZONE_DMA 0 #ifdef __LP64__ -#define VM_PAGE_SEG_DMA32 1 -#define VM_PAGE_SEG_DIRECTMAP 2 -#define VM_PAGE_SEG_HIGHMEM 3 +#define VM_PAGE_ZONE_DMA32 1 +#define VM_PAGE_ZONE_DIRECTMAP 2 +#define VM_PAGE_ZONE_HIGHMEM 3 #else /* __LP64__ */ -#define VM_PAGE_SEG_DMA32 1 /* Alias for the DIRECTMAP segment */ -#define VM_PAGE_SEG_DIRECTMAP 1 -#define VM_PAGE_SEG_HIGHMEM 2 +#define VM_PAGE_ZONE_DMA32 1 /* Alias for the DIRECTMAP zone */ +#define VM_PAGE_ZONE_DIRECTMAP 1 +#define VM_PAGE_ZONE_HIGHMEM 2 #endif /* __LP64__ */ #endif /* _X86_PARAM_H */ diff --git a/vm/vm_page.c b/vm/vm_page.c index daad01fa..19f74a40 100644 --- a/vm/vm_page.c +++ b/vm/vm_page.c @@ -51,13 +51,13 @@ #define DEBUG 0 /* - * Number of free block lists per segment. + * Number of free block lists per zone. */ #define VM_PAGE_NR_FREE_LISTS 11 /* * The size of a CPU pool is computed by dividing the number of pages in its - * containing segment by this value. + * containing zone by this value. */ #define VM_PAGE_CPU_POOL_RATIO 1024 @@ -98,14 +98,14 @@ struct vm_page_free_list { }; /* - * Segment name buffer size. + * Zone name buffer size. */ #define VM_PAGE_NAME_SIZE 16 /* - * Segment of contiguous memory. + * Zone of contiguous memory. */ -struct vm_page_seg { +struct vm_page_zone { struct vm_page_cpu_pool cpu_pools[MAX_CPUS]; phys_addr_t start; @@ -118,9 +118,9 @@ struct vm_page_seg { }; /* - * Bootstrap information about a segment. + * Bootstrap information about a zone. */ -struct vm_page_boot_seg { +struct vm_page_boot_zone { phys_addr_t start; phys_addr_t end; bool heap_present; @@ -131,39 +131,40 @@ struct vm_page_boot_seg { static int vm_page_is_ready __read_mostly; /* - * Segment table. + * Zone table. * - * The system supports a maximum of 4 segments : + * The system supports a maximum of 4 zones : * - DMA: suitable for DMA * - DMA32: suitable for DMA when devices support 32-bits addressing * - DIRECTMAP: direct physical mapping, allows direct access from * the kernel with a simple offset translation * - HIGHMEM: must be mapped before it can be accessed * - * Segments are ordered by priority, 0 being the lowest priority. Their - * relative priorities are DMA < DMA32 < DIRECTMAP < HIGHMEM. Some segments + * Zones are ordered by priority, 0 being the lowest priority. Their + * relative priorities are DMA < DMA32 < DIRECTMAP < HIGHMEM. Some zones * may actually be aliases for others, e.g. if DMA is always possible from * the direct physical mapping, DMA and DMA32 are aliases for DIRECTMAP, - * in which case the segment table contains DIRECTMAP and HIGHMEM only. + * in which case the zone table contains DIRECTMAP and HIGHMEM only. */ -static struct vm_page_seg vm_page_segs[VM_PAGE_MAX_SEGS]; +static struct vm_page_zone vm_page_zones[VM_PAGE_MAX_ZONES]; /* - * Bootstrap segment table. + * Bootstrap zone table. */ -static struct vm_page_boot_seg vm_page_boot_segs[VM_PAGE_MAX_SEGS] __initdata; +static struct vm_page_boot_zone vm_page_boot_zones[VM_PAGE_MAX_ZONES] + __initdata; /* - * Number of loaded segments. + * Number of loaded zones. */ -static unsigned int vm_page_segs_size __read_mostly; +static unsigned int vm_page_zones_size __read_mostly; static void __init -vm_page_init(struct vm_page *page, unsigned short seg_index, phys_addr_t pa) +vm_page_init(struct vm_page *page, unsigned short zone_index, phys_addr_t pa) { memset(page, 0, sizeof(*page)); page->type = VM_PAGE_RESERVED; - page->seg_index = seg_index; + page->zone_index = zone_index; page->order = VM_PAGE_ORDER_UNLISTED; page->phys_addr = pa; } @@ -208,7 +209,7 @@ vm_page_free_list_remove(struct vm_page_free_list *free_list, } static struct vm_page * -vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order) +vm_page_zone_alloc_from_buddy(struct vm_page_zone *zone, unsigned int order) { struct vm_page_free_list *free_list = free_list; struct vm_page *page, *buddy; @@ -217,7 +218,7 @@ vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order) assert(order < VM_PAGE_NR_FREE_LISTS); for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) { - free_list = &seg->free_lists[i]; + free_list = &zone->free_lists[i]; if (free_list->size != 0) { break; @@ -235,24 +236,24 @@ vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order) while (i > order) { i--; buddy = &page[1 << i]; - vm_page_free_list_insert(&seg->free_lists[i], buddy); + vm_page_free_list_insert(&zone->free_lists[i], buddy); buddy->order = i; } - seg->nr_free_pages -= (1 << order); + zone->nr_free_pages -= (1 << order); return page; } static void -vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page, - unsigned int order) +vm_page_zone_free_to_buddy(struct vm_page_zone *zone, struct vm_page *page, + unsigned int order) { struct vm_page *buddy; phys_addr_t pa, buddy_pa; unsigned int nr_pages; - assert(page >= seg->pages); - assert(page < seg->pages_end); + assert(page >= zone->pages); + assert(page < zone->pages_end); assert(page->order == VM_PAGE_ORDER_UNLISTED); assert(order < VM_PAGE_NR_FREE_LISTS); @@ -262,26 +263,26 @@ vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page, while (order < (VM_PAGE_NR_FREE_LISTS - 1)) { buddy_pa = pa ^ vm_page_ptoa(1 << order); - if ((buddy_pa < seg->start) || (buddy_pa >= seg->end)) { + if ((buddy_pa < zone->start) || (buddy_pa >= zone->end)) { break; } - buddy = &seg->pages[vm_page_atop(buddy_pa - seg->start)]; + buddy = &zone->pages[vm_page_atop(buddy_pa - zone->start)]; if (buddy->order != order) { break; } - vm_page_free_list_remove(&seg->free_lists[order], buddy); + vm_page_free_list_remove(&zone->free_lists[order], buddy); buddy->order = VM_PAGE_ORDER_UNLISTED; order++; pa &= -vm_page_ptoa(1 << order); - page = &seg->pages[vm_page_atop(pa - seg->start)]; + page = &zone->pages[vm_page_atop(pa - zone->start)]; } - vm_page_free_list_insert(&seg->free_lists[order], page); + vm_page_free_list_insert(&zone->free_lists[order], page); page->order = order; - seg->nr_free_pages += nr_pages; + zone->nr_free_pages += nr_pages; } static void __init @@ -296,9 +297,9 @@ vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size) } static inline struct vm_page_cpu_pool * -vm_page_cpu_pool_get(struct vm_page_seg *seg) +vm_page_cpu_pool_get(struct vm_page_zone *zone) { - return &seg->cpu_pools[cpu_id()]; + return &zone->cpu_pools[cpu_id()]; } static inline struct vm_page * @@ -323,17 +324,17 @@ vm_page_cpu_pool_push(struct vm_page_cpu_pool *cpu_pool, struct vm_page *page) static int vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool, - struct vm_page_seg *seg) + struct vm_page_zone *zone) { struct vm_page *page; int i; assert(cpu_pool->nr_pages == 0); - mutex_lock(&seg->lock); + mutex_lock(&zone->lock); for (i = 0; i < cpu_pool->transfer_size; i++) { - page = vm_page_seg_alloc_from_buddy(seg, 0); + page = vm_page_zone_alloc_from_buddy(zone, 0); if (page == NULL) { break; @@ -342,42 +343,42 @@ vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool, vm_page_cpu_pool_push(cpu_pool, page); } - mutex_unlock(&seg->lock); + mutex_unlock(&zone->lock); return i; } static void vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool, - struct vm_page_seg *seg) + struct vm_page_zone *zone) { struct vm_page *page; int i; assert(cpu_pool->nr_pages == cpu_pool->size); - mutex_lock(&seg->lock); + mutex_lock(&zone->lock); for (i = cpu_pool->transfer_size; i > 0; i--) { page = vm_page_cpu_pool_pop(cpu_pool); - vm_page_seg_free_to_buddy(seg, page, 0); + vm_page_zone_free_to_buddy(zone, page, 0); } - mutex_unlock(&seg->lock); + mutex_unlock(&zone->lock); } static phys_addr_t __init -vm_page_seg_size(struct vm_page_seg *seg) +vm_page_zone_size(struct vm_page_zone *zone) { - return seg->end - seg->start; + return zone->end - zone->start; } static int __init -vm_page_seg_compute_pool_size(struct vm_page_seg *seg) +vm_page_zone_compute_pool_size(struct vm_page_zone *zone) { phys_addr_t size; - size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO; + size = vm_page_atop(vm_page_zone_size(zone)) / VM_PAGE_CPU_POOL_RATIO; if (size == 0) { size = 1; @@ -389,40 +390,40 @@ vm_page_seg_compute_pool_size(struct vm_page_seg *seg) } static void __init -vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end, - struct vm_page *pages) +vm_page_zone_init(struct vm_page_zone *zone, phys_addr_t start, phys_addr_t end, + struct vm_page *pages) { phys_addr_t pa; int pool_size; unsigned int i; - seg->start = start; - seg->end = end; - pool_size = vm_page_seg_compute_pool_size(seg); + zone->start = start; + zone->end = end; + pool_size = vm_page_zone_compute_pool_size(zone); - for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++) { - vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size); + for (i = 0; i < ARRAY_SIZE(zone->cpu_pools); i++) { + vm_page_cpu_pool_init(&zone->cpu_pools[i], pool_size); } - seg->pages = pages; - seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg)); - mutex_init(&seg->lock); + zone->pages = pages; + zone->pages_end = pages + vm_page_atop(vm_page_zone_size(zone)); + mutex_init(&zone->lock); - for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++) { - vm_page_free_list_init(&seg->free_lists[i]); + for (i = 0; i < ARRAY_SIZE(zone->free_lists); i++) { + vm_page_free_list_init(&zone->free_lists[i]); } - seg->nr_free_pages = 0; - i = seg - vm_page_segs; + zone->nr_free_pages = 0; + i = zone - vm_page_zones; - for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE) { - vm_page_init(&pages[vm_page_atop(pa - seg->start)], i, pa); + for (pa = zone->start; pa < zone->end; pa += PAGE_SIZE) { + vm_page_init(&pages[vm_page_atop(pa - zone->start)], i, pa); } } static struct vm_page * -vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order, - unsigned short type) +vm_page_zone_alloc(struct vm_page_zone *zone, unsigned int order, + unsigned short type) { struct vm_page_cpu_pool *cpu_pool; struct vm_page *page; @@ -432,11 +433,11 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order, if (order == 0) { thread_pin(); - cpu_pool = vm_page_cpu_pool_get(seg); + cpu_pool = vm_page_cpu_pool_get(zone); mutex_lock(&cpu_pool->lock); if (cpu_pool->nr_pages == 0) { - filled = vm_page_cpu_pool_fill(cpu_pool, seg); + filled = vm_page_cpu_pool_fill(cpu_pool, zone); if (!filled) { mutex_unlock(&cpu_pool->lock); @@ -449,9 +450,9 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order, mutex_unlock(&cpu_pool->lock); thread_unpin(); } else { - mutex_lock(&seg->lock); - page = vm_page_seg_alloc_from_buddy(seg, order); - mutex_unlock(&seg->lock); + mutex_lock(&zone->lock); + page = vm_page_zone_alloc_from_buddy(zone, order); + mutex_unlock(&zone->lock); if (page == NULL) { return NULL; @@ -464,8 +465,8 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order, } static void -vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page, - unsigned int order) +vm_page_zone_free(struct vm_page_zone *zone, struct vm_page *page, + unsigned int order) { struct vm_page_cpu_pool *cpu_pool; @@ -476,69 +477,69 @@ vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page, if (order == 0) { thread_pin(); - cpu_pool = vm_page_cpu_pool_get(seg); + cpu_pool = vm_page_cpu_pool_get(zone); mutex_lock(&cpu_pool->lock); if (cpu_pool->nr_pages == cpu_pool->size) { - vm_page_cpu_pool_drain(cpu_pool, seg); + vm_page_cpu_pool_drain(cpu_pool, zone); } vm_page_cpu_pool_push(cpu_pool, page); mutex_unlock(&cpu_pool->lock); thread_unpin(); } else { - mutex_lock(&seg->lock); - vm_page_seg_free_to_buddy(seg, page, order); - mutex_unlock(&seg->lock); + mutex_lock(&zone->lock); + vm_page_zone_free_to_buddy(zone, page, order); + mutex_unlock(&zone->lock); } } void __init -vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end) +vm_page_load(unsigned int zone_index, phys_addr_t start, phys_addr_t end) { - struct vm_page_boot_seg *seg; + struct vm_page_boot_zone *zone; - assert(seg_index < ARRAY_SIZE(vm_page_boot_segs)); + assert(zone_index < ARRAY_SIZE(vm_page_boot_zones)); assert(vm_page_aligned(start)); assert(vm_page_aligned(end)); assert(start < end); - assert(vm_page_segs_size < ARRAY_SIZE(vm_page_boot_segs)); + assert(vm_page_zones_size < ARRAY_SIZE(vm_page_boot_zones)); - seg = &vm_page_boot_segs[seg_index]; - seg->start = start; - seg->end = end; - seg->heap_present = false; + zone = &vm_page_boot_zones[zone_index]; + zone->start = start; + zone->end = end; + zone->heap_present = false; #if DEBUG printk("vm_page: load: %s: %llx:%llx\n", - vm_page_seg_name(seg_index), + vm_page_zone_name(zone_index), (unsigned long long)start, (unsigned long long)end); #endif - vm_page_segs_size++; + vm_page_zones_size++; } void -vm_page_load_heap(unsigned int seg_index, phys_addr_t start, phys_addr_t end) +vm_page_load_heap(unsigned int zone_index, phys_addr_t start, phys_addr_t end) { - struct vm_page_boot_seg *seg; + struct vm_page_boot_zone *zone; - assert(seg_index < ARRAY_SIZE(vm_page_boot_segs)); + assert(zone_index < ARRAY_SIZE(vm_page_boot_zones)); assert(vm_page_aligned(start)); assert(vm_page_aligned(end)); - seg = &vm_page_boot_segs[seg_index]; + zone = &vm_page_boot_zones[zone_index]; - assert(seg->start <= start); - assert(end <= seg-> end); + assert(zone->start <= start); + assert(end <= zone-> end); - seg->avail_start = start; - seg->avail_end = end; - seg->heap_present = true; + zone->avail_start = start; + zone->avail_end = end; + zone->heap_present = true; #if DEBUG printk("vm_page: heap: %s: %llx:%llx\n", - vm_page_seg_name(seg_index), + vm_page_zone_name(zone_index), (unsigned long long)start, (unsigned long long)end); #endif } @@ -550,88 +551,88 @@ vm_page_ready(void) } static unsigned int -vm_page_select_alloc_seg(unsigned int selector) +vm_page_select_alloc_zone(unsigned int selector) { - unsigned int seg_index; + unsigned int zone_index; switch (selector) { case VM_PAGE_SEL_DMA: - seg_index = VM_PAGE_SEG_DMA; + zone_index = VM_PAGE_ZONE_DMA; break; case VM_PAGE_SEL_DMA32: - seg_index = VM_PAGE_SEG_DMA32; + zone_index = VM_PAGE_ZONE_DMA32; break; case VM_PAGE_SEL_DIRECTMAP: - seg_index = VM_PAGE_SEG_DIRECTMAP; + zone_index = VM_PAGE_ZONE_DIRECTMAP; break; case VM_PAGE_SEL_HIGHMEM: - seg_index = VM_PAGE_SEG_HIGHMEM; + zone_index = VM_PAGE_ZONE_HIGHMEM; break; default: panic("vm_page: invalid selector"); } - return MIN(vm_page_segs_size - 1, seg_index); + return MIN(vm_page_zones_size - 1, zone_index); } static int __init -vm_page_boot_seg_loaded(const struct vm_page_boot_seg *seg) +vm_page_boot_zone_loaded(const struct vm_page_boot_zone *zone) { - return (seg->end != 0); + return (zone->end != 0); } static void __init -vm_page_check_boot_segs(void) +vm_page_check_boot_zones(void) { unsigned int i; int expect_loaded; - if (vm_page_segs_size == 0) { + if (vm_page_zones_size == 0) { panic("vm_page: no physical memory loaded"); } - for (i = 0; i < ARRAY_SIZE(vm_page_boot_segs); i++) { - expect_loaded = (i < vm_page_segs_size); + for (i = 0; i < ARRAY_SIZE(vm_page_boot_zones); i++) { + expect_loaded = (i < vm_page_zones_size); - if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded) { + if (vm_page_boot_zone_loaded(&vm_page_boot_zones[i]) == expect_loaded) { continue; } - panic("vm_page: invalid boot segment table"); + panic("vm_page: invalid boot zone table"); } } static phys_addr_t __init -vm_page_boot_seg_size(struct vm_page_boot_seg *seg) +vm_page_boot_zone_size(struct vm_page_boot_zone *zone) { - return seg->end - seg->start; + return zone->end - zone->start; } static phys_addr_t __init -vm_page_boot_seg_avail_size(struct vm_page_boot_seg *seg) +vm_page_boot_zone_avail_size(struct vm_page_boot_zone *zone) { - return seg->avail_end - seg->avail_start; + return zone->avail_end - zone->avail_start; } static void * __init vm_page_bootalloc(size_t size) { - struct vm_page_boot_seg *seg; + struct vm_page_boot_zone *zone; phys_addr_t pa; unsigned int i; - for (i = vm_page_select_alloc_seg(VM_PAGE_SEL_DIRECTMAP); - i < vm_page_segs_size; + for (i = vm_page_select_alloc_zone(VM_PAGE_SEL_DIRECTMAP); + i < vm_page_zones_size; i--) { - seg = &vm_page_boot_segs[i]; + zone = &vm_page_boot_zones[i]; - if (!seg->heap_present) { + if (!zone->heap_present) { continue; } - if (size <= vm_page_boot_seg_avail_size(seg)) { - pa = seg->avail_start; - seg->avail_start += vm_page_round(size); + if (size <= vm_page_boot_zone_avail_size(zone)) { + pa = zone->avail_start; + zone->avail_start += vm_page_round(size); return (void *)vm_page_direct_va(pa); } } @@ -642,23 +643,23 @@ vm_page_bootalloc(size_t size) void __init vm_page_setup(void) { - struct vm_page_boot_seg *boot_seg; - struct vm_page_seg *seg; + struct vm_page_boot_zone *boot_zone; + struct vm_page_zone *zone; struct vm_page *table, *page, *end; size_t nr_pages, table_size; unsigned long va; unsigned int i; phys_addr_t pa; - vm_page_check_boot_segs(); + vm_page_check_boot_zones(); /* * Compute the page table size. */ nr_pages = 0; - for (i = 0; i < vm_page_segs_size; i++) { - nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i])); + for (i = 0; i < vm_page_zones_size; i++) { + nr_pages += vm_page_atop(vm_page_boot_zone_size(&vm_page_boot_zones[i])); } table_size = vm_page_round(nr_pages * sizeof(struct vm_page)); @@ -668,26 +669,26 @@ vm_page_setup(void) va = (unsigned long)table; /* - * Initialize the segments, associating them to the page table. When - * the segments are initialized, all their pages are set allocated. + * Initialize the zones, associating them to the page table. When + * the zones are initialized, all their pages are set allocated. * Pages are then released, which populates the free lists. */ - for (i = 0; i < vm_page_segs_size; i++) { - seg = &vm_page_segs[i]; - boot_seg = &vm_page_boot_segs[i]; - vm_page_seg_init(seg, boot_seg->start, boot_seg->end, table); - page = seg->pages + vm_page_atop(boot_seg->avail_start - - boot_seg->start); - end = seg->pages + vm_page_atop(boot_seg->avail_end - - boot_seg->start); + for (i = 0; i < vm_page_zones_size; i++) { + zone = &vm_page_zones[i]; + boot_zone = &vm_page_boot_zones[i]; + vm_page_zone_init(zone, boot_zone->start, boot_zone->end, table); + page = zone->pages + vm_page_atop(boot_zone->avail_start + - boot_zone->start); + end = zone->pages + vm_page_atop(boot_zone->avail_end + - boot_zone->start); while (page < end) { page->type = VM_PAGE_FREE; - vm_page_seg_free_to_buddy(seg, page, 0); + vm_page_zone_free_to_buddy(zone, page, 0); page++; } - table += vm_page_atop(vm_page_seg_size(seg)); + table += vm_page_atop(vm_page_zone_size(zone)); } while (va < (unsigned long)table) { @@ -704,24 +705,24 @@ vm_page_setup(void) void __init vm_page_manage(struct vm_page *page) { - assert(page->seg_index < ARRAY_SIZE(vm_page_segs)); + assert(page->zone_index < ARRAY_SIZE(vm_page_zones)); assert(page->type == VM_PAGE_RESERVED); vm_page_set_type(page, 0, VM_PAGE_FREE); - vm_page_seg_free_to_buddy(&vm_page_segs[page->seg_index], page, 0); + vm_page_zone_free_to_buddy(&vm_page_zones[page->zone_index], page, 0); } struct vm_page * vm_page_lookup(phys_addr_t pa) { - struct vm_page_seg *seg; + struct vm_page_zone *zone; unsigned int i; - for (i = 0; i < vm_page_segs_size; i++) { - seg = &vm_page_segs[i]; + for (i = 0; i < vm_page_zones_size; i++) { + zone = &vm_page_zones[i]; - if ((pa >= seg->start) && (pa < seg->end)) { - return &seg->pages[vm_page_atop(pa - seg->start)]; + if ((pa >= zone->start) && (pa < zone->end)) { + return &zone->pages[vm_page_atop(pa - zone->start)]; } } @@ -734,8 +735,8 @@ vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type) struct vm_page *page; unsigned int i; - for (i = vm_page_select_alloc_seg(selector); i < vm_page_segs_size; i--) { - page = vm_page_seg_alloc(&vm_page_segs[i], order, type); + for (i = vm_page_select_alloc_zone(selector); i < vm_page_zones_size; i--) { + page = vm_page_zone_alloc(&vm_page_zones[i], order, type); if (page != NULL) { return page; @@ -748,40 +749,40 @@ vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type) void vm_page_free(struct vm_page *page, unsigned int order) { - assert(page->seg_index < ARRAY_SIZE(vm_page_segs)); + assert(page->zone_index < ARRAY_SIZE(vm_page_zones)); - vm_page_seg_free(&vm_page_segs[page->seg_index], page, order); + vm_page_zone_free(&vm_page_zones[page->zone_index], page, order); } const char * -vm_page_seg_name(unsigned int seg_index) +vm_page_zone_name(unsigned int zone_index) { - /* Don't use a switch statement since segments can be aliased */ - if (seg_index == VM_PAGE_SEG_HIGHMEM) { + /* Don't use a switch statement since zones can be aliased */ + if (zone_index == VM_PAGE_ZONE_HIGHMEM) { return "HIGHMEM"; - } else if (seg_index == VM_PAGE_SEG_DIRECTMAP) { + } else if (zone_index == VM_PAGE_ZONE_DIRECTMAP) { return "DIRECTMAP"; - } else if (seg_index == VM_PAGE_SEG_DMA32) { + } else if (zone_index == VM_PAGE_ZONE_DMA32) { return "DMA32"; - } else if (seg_index == VM_PAGE_SEG_DMA) { + } else if (zone_index == VM_PAGE_ZONE_DMA) { return "DMA"; } else { - panic("vm_page: invalid segment index"); + panic("vm_page: invalid zone index"); } } void vm_page_info(void) { - struct vm_page_seg *seg; + struct vm_page_zone *zone; unsigned long pages; unsigned int i; - for (i = 0; i < vm_page_segs_size; i++) { - seg = &vm_page_segs[i]; - pages = (unsigned long)(seg->pages_end - seg->pages); + for (i = 0; i < vm_page_zones_size; i++) { + zone = &vm_page_zones[i]; + pages = (unsigned long)(zone->pages_end - zone->pages); printk("vm_page: %s: pages: %lu (%luM), free: %lu (%luM)\n", - vm_page_seg_name(i), pages, pages >> (20 - PAGE_SHIFT), - seg->nr_free_pages, seg->nr_free_pages >> (20 - PAGE_SHIFT)); + vm_page_zone_name(i), pages, pages >> (20 - PAGE_SHIFT), + zone->nr_free_pages, zone->nr_free_pages >> (20 - PAGE_SHIFT)); } } diff --git a/vm/vm_page.h b/vm/vm_page.h index 1ce172f2..f2011b40 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -43,9 +43,9 @@ #define vm_page_aligned(addr) P2ALIGNED(addr, PAGE_SIZE) /* - * Segment selectors. + * Zone selectors. * - * Selector-to-segment-list translation table : + * Selector-to-zone-list translation table : * DMA DMA * DMA32 DMA32 DMA * DIRECTMAP DIRECTMAP DMA32 DMA @@ -73,7 +73,7 @@ struct vm_page { struct list node; unsigned short type; - unsigned short seg_index; + unsigned short zone_index; unsigned short order; phys_addr_t phys_addr; void *priv; @@ -139,17 +139,17 @@ vm_page_get_priv(const struct vm_page *page) /* * Load physical memory into the vm_page module at boot time. * - * All addresses must be page-aligned. Segments can be loaded in any order. + * All addresses must be page-aligned. Zones can be loaded in any order. */ -void vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end); +void vm_page_load(unsigned int zone_index, phys_addr_t start, phys_addr_t end); /* * Load available physical memory into the vm_page module at boot time. * - * The segment referred to must have been loaded with vm_page_load + * The zone referred to must have been loaded with vm_page_load * before loading its heap. */ -void vm_page_load_heap(unsigned int seg_index, phys_addr_t start, +void vm_page_load_heap(unsigned int zone_index, phys_addr_t start, phys_addr_t end); /* @@ -162,10 +162,10 @@ int vm_page_ready(void); /* * Set up the vm_page module. * - * Architecture-specific code must have loaded segments before calling this - * function. Segments must comply with the selector-to-segment-list table, + * Architecture-specific code must have loaded zones before calling this + * function. Zones must comply with the selector-to-zone-list table, * e.g. HIGHMEM is loaded if and only if DIRECTMAP, DMA32 and DMA are loaded, - * notwithstanding segment aliasing. + * notwithstanding zone aliasing. * * Once this function returns, the vm_page module is ready, and normal * allocation functions can be used. @@ -188,7 +188,7 @@ struct vm_page * vm_page_lookup(phys_addr_t pa); /* * Allocate a block of 2^order physical pages. * - * The selector is used to determine the segments from which allocation can + * The selector is used to determine the zones from which allocation can * be attempted. */ struct vm_page * vm_page_alloc(unsigned int order, unsigned int selector, @@ -200,9 +200,9 @@ struct vm_page * vm_page_alloc(unsigned int order, unsigned int selector, void vm_page_free(struct vm_page *page, unsigned int order); /* - * Return the name of the given segment. + * Return the name of the given zone. */ -const char * vm_page_seg_name(unsigned int seg_index); +const char * vm_page_zone_name(unsigned int zone_index); /* * Display internal information about the module. |