diff options
Diffstat (limited to 'i386/intel/pmap.c')
-rw-r--r-- | i386/intel/pmap.c | 262 |
1 files changed, 142 insertions, 120 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 0b8ae903..490c1d95 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -63,7 +63,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/thread.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/lock.h> @@ -113,7 +113,7 @@ pv_entry_t pv_head_table; /* array of entries, one per page */ /* * pv_list entries are kept on a list that can only be accessed * with the pmap system locked (at SPLVM, not in the cpus_active set). - * The list is refilled from the pv_list_zone if it becomes empty. + * The list is refilled from the pv_list_cache if it becomes empty. */ pv_entry_t pv_free_list; /* free list at SPLVM */ decl_simple_lock_data(, pv_free_list_lock) @@ -133,7 +133,7 @@ decl_simple_lock_data(, pv_free_list_lock) simple_unlock(&pv_free_list_lock); \ } -zone_t pv_list_zone; /* zone of pv_entry structures */ +struct kmem_cache pv_list_cache; /* cache of pv_entry structures */ /* * Each entry in the pv_head_table is locked by a bit in the @@ -155,9 +155,6 @@ boolean_t pmap_initialized = FALSE; vm_offset_t kernel_virtual_start; vm_offset_t kernel_virtual_end; -/* XXX stupid fixed limit - get rid */ -vm_size_t morevm = 128 * 1024 * 1024; /* VM space for kernel map */ - /* * Index into pv_head table, its lock bits, and the modify/reference * bits starting at phys_first_addr. @@ -330,7 +327,7 @@ lock_data_t pmap_system_lock; #define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */ -#ifdef MACH_HYP +#ifdef MACH_PV_PAGETABLES #if 1 #define INVALIDATE_TLB(pmap, s, e) hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL) #else @@ -342,7 +339,7 @@ lock_data_t pmap_system_lock; hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL); \ } while(0) #endif -#else /* MACH_HYP */ +#else /* MACH_PV_PAGETABLES */ #if 0 /* It is hard to know when a TLB flush becomes less expensive than a bunch of * invlpgs. But it surely is more expensive than just one invlpg. */ @@ -361,7 +358,7 @@ lock_data_t pmap_system_lock; flush_tlb(); \ } #endif -#endif /* MACH_HYP */ +#endif /* MACH_PV_PAGETABLES */ #if NCPUS > 1 @@ -403,7 +400,7 @@ struct pmap_update_list cpu_update_list[NCPUS]; struct pmap kernel_pmap_store; pmap_t kernel_pmap; -struct zone *pmap_zone; /* zone of pmap structures */ +struct kmem_cache pmap_cache; /* cache of pmap structures */ int pmap_debug = 0; /* flag for debugging prints */ @@ -531,10 +528,10 @@ vm_offset_t pmap_map_bd(virt, start, end, prot) register pt_entry_t template; register pt_entry_t *pte; int spl; -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES int n, i = 0; struct mmu_update update[HYP_BATCH_MMU_UPDATES]; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ template = pa_to_pte(start) | INTEL_PTE_NCACHE|INTEL_PTE_WTHRU @@ -549,7 +546,7 @@ vm_offset_t pmap_map_bd(virt, start, end, prot) pte = pmap_pte(kernel_pmap, virt); if (pte == PT_ENTRY_NULL) panic("pmap_map_bd: Invalid kernel address\n"); -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES update[i].ptr = kv_to_ma(pte); update[i].val = pa_to_ma(template); i++; @@ -559,20 +556,20 @@ vm_offset_t pmap_map_bd(virt, start, end, prot) panic("couldn't pmap_map_bd\n"); i = 0; } -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ WRITE_PTE(pte, template) -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ pte_increment_pa(template); virt += PAGE_SIZE; start += PAGE_SIZE; } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (i > HYP_BATCH_MMU_UPDATES) panic("overflowed array in pmap_map_bd"); hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF); if (n != i) panic("couldn't pmap_map_bd\n"); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ PMAP_READ_UNLOCK(pmap, spl); return(virt); } @@ -620,8 +617,7 @@ void pmap_bootstrap() * and extends to a stupid arbitrary limit beyond that. */ kernel_virtual_start = phystokv(phys_last_addr); - kernel_virtual_end = phystokv(phys_last_addr) + morevm - + (phys_last_addr - phys_first_addr); + kernel_virtual_end = phystokv(phys_last_addr) + VM_KERNEL_MAP_SIZE; if (kernel_virtual_end < kernel_virtual_start || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS) @@ -653,7 +649,7 @@ void pmap_bootstrap() kernel_pmap->dirbase[i] = 0; } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES /* We don't actually deal with the CR3 register content at all */ hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3); /* @@ -671,25 +667,27 @@ void pmap_bootstrap() pt_entry_t *l1_map[NSUP_L1]; { pt_entry_t *base = (pt_entry_t*) boot_info.pt_base; - int i; + vm_offset_t la; int n_l1map; + for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS; la >= VM_MIN_KERNEL_ADDRESS; la += NPTES * PAGE_SIZE) { #ifdef PAE - pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[0]); + pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)]); #else /* PAE */ - pt_entry_t *l2_map = base; + pt_entry_t *l2_map = base; #endif /* PAE */ - for (n_l1map = 0, i = lin2pdenum(VM_MIN_KERNEL_ADDRESS); i < NPTES; i++) { - if (!(l2_map[i] & INTEL_PTE_VALID)) { + /* Like lin2pdenum, but works with non-contiguous boot L3 */ + l2_map += (la >> PDESHIFT) & PDEMASK; + if (!(*l2_map & INTEL_PTE_VALID)) { struct mmu_update update; int j, n; l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page()); for (j = 0; j < NPTES; j++) - l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn((i - lin2pdenum(VM_MIN_KERNEL_ADDRESS)) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE; + l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE; pmap_set_page_readonly_init(l1_map[n_l1map]); if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map]))) panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map])); - update.ptr = kv_to_ma(&l2_map[i]); + update.ptr = kv_to_ma(l2_map); update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE; hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF); if (n != 1) @@ -700,7 +698,7 @@ void pmap_bootstrap() } } } -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ /* * Allocate and set up the kernel page tables. @@ -720,7 +718,7 @@ void pmap_bootstrap() * to allocate new kernel page tables later. * XX fix this */ - for (va = phystokv(phys_first_addr); va < kernel_virtual_end; ) + for (va = phystokv(phys_first_addr); va >= phystokv(phys_first_addr) && va < kernel_virtual_end; ) { pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va)); pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page()); @@ -738,24 +736,24 @@ void pmap_bootstrap() WRITE_PTE(pte, 0); } else -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (va == (vm_offset_t) &hyp_shared_info) { *pte = boot_info.shared_info | INTEL_PTE_VALID | INTEL_PTE_WRITE; va += INTEL_PGBYTES; } else -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ { extern char _start[], etext[]; if (((va >= (vm_offset_t) _start) && (va + INTEL_PGBYTES <= (vm_offset_t)etext)) -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES || (va >= (vm_offset_t) boot_info.pt_base && (va + INTEL_PGBYTES <= (vm_offset_t) ptable + INTEL_PGBYTES)) -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ ) { WRITE_PTE(pte, pa_to_pte(_kvtophys(va)) @@ -763,7 +761,7 @@ void pmap_bootstrap() } else { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES /* Keep supplementary L1 pages read-only */ int i; for (i = 0; i < NSUP_L1; i++) @@ -773,7 +771,7 @@ void pmap_bootstrap() break; } if (i == NSUP_L1) -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ WRITE_PTE(pte, pa_to_pte(_kvtophys(va)) | INTEL_PTE_VALID | INTEL_PTE_WRITE | global) @@ -786,11 +784,11 @@ void pmap_bootstrap() WRITE_PTE(pte, 0); va += INTEL_PGBYTES; } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES pmap_set_page_readonly_init(ptable); if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable))) panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable)); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } } @@ -798,7 +796,7 @@ void pmap_bootstrap() soon after we return from here. */ } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES /* These are only required because of Xen security policies */ /* Set back a page read write */ @@ -830,26 +828,27 @@ void pmap_set_page_readonly(void *_vaddr) { } /* This needs to be called instead of pmap_set_page_readonly as long as RC3 - * still points to the bootstrap dirbase. */ + * still points to the bootstrap dirbase, to also fix the bootstrap table. */ void pmap_set_page_readonly_init(void *_vaddr) { vm_offset_t vaddr = (vm_offset_t) _vaddr; #if PAE pt_entry_t *pdpbase = (void*) boot_info.pt_base; - vm_offset_t dirbase = ptetokv(pdpbase[0]); + /* The bootstrap table does not necessarily use contiguous pages for the pde tables */ + pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)]); #else - vm_offset_t dirbase = boot_info.pt_base; + pt_entry_t *dirbase = (void*) boot_info.pt_base; #endif - struct pmap linear_pmap = { - .dirbase = (void*) dirbase, - }; + pt_entry_t *pte = &dirbase[lin2pdenum(vaddr) & PTEMASK]; /* Modify our future kernel map (can't use update_va_mapping for this)... */ - if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) + if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) { if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID)) panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr)); + } /* ... and the bootstrap map. */ - if (*pmap_pde(&linear_pmap, vaddr) & INTEL_PTE_VALID) + if (*pte & INTEL_PTE_VALID) { if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE)) panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr)); + } } void pmap_clear_bootstrap_pagetable(pt_entry_t *base) { @@ -872,7 +871,7 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) { #endif /* PAE */ for (i = 0; i < NPTES; i++) { pt_entry_t pde = dir[i]; - unsigned long pfn = mfn_to_pfn(atop(pde)); + unsigned long pfn = atop(pte_to_pa(pde)); void *pgt = (void*) phystokv(ptoa(pfn)); if (pde & INTEL_PTE_VALID) hyp_free_page(pfn, pgt); @@ -890,7 +889,7 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) { #endif /* PAE */ hyp_free_page(atop(_kvtophys(base)), base); } -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ void pmap_virtual_space(startp, endp) vm_offset_t *startp; @@ -941,13 +940,13 @@ void pmap_init() pmap_phys_attributes = (char *) addr; /* - * Create the zone of physical maps, + * Create the cache of physical maps, * and of the physical-to-virtual entries. */ s = (vm_size_t) sizeof(struct pmap); - pmap_zone = zinit(s, 0, 400*s, 4096, 0, "pmap"); /* XXX */ + kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0); s = (vm_size_t) sizeof(struct pv_entry); - pv_list_zone = zinit(s, 0, 10000*s, 4096, 0, "pv_list"); /* XXX */ + kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0); #if NCPUS > 1 /* @@ -1013,7 +1012,7 @@ pmap_page_table_page_alloc() /* * We cannot allocate the pmap_object in pmap_init, - * because it is called before the zone package is up. + * because it is called before the cache package is up. * Allocate it now if it is missing. */ if (pmap_object == VM_OBJECT_NULL) @@ -1052,21 +1051,38 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) { pt_entry_t *pte, *pdp; vm_offset_t ptp; pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT; + + /* Add a ptp if none exist yet for this pte */ if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) { ptp = phystokv(pmap_page_table_page_alloc()); +#ifdef MACH_PV_PAGETABLES pmap_set_page_readonly((void*) ptp); if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, pa_to_mfn(ptp))) panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp)); +#endif /* MACH_PV_PAGETABLES */ pdp = pmap_pde(kernel_pmap, addr); + +#ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pdp), pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID | INTEL_PTE_USER | INTEL_PTE_WRITE)) panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp)); +#else /* MACH_PV_PAGETABLES */ + *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID + | INTEL_PTE_USER + | INTEL_PTE_WRITE; +#endif /* MACH_PV_PAGETABLES */ pte = pmap_pte(kernel_pmap, addr); } + +#ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE)) panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma)); +#else /* MACH_PV_PAGETABLES */ + /* Note: in this case, mfn is actually a pfn. */ + WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE); +#endif /* MACH_PV_PAGETABLES */ } #endif /* MACH_XEN */ @@ -1117,11 +1133,11 @@ pmap_t pmap_create(size) } /* - * Allocate a pmap struct from the pmap_zone. Then allocate - * the page descriptor table from the pd_zone. + * Allocate a pmap struct from the pmap_cache. Then allocate + * the page descriptor table. */ - p = (pmap_t) zalloc(pmap_zone); + p = (pmap_t) kmem_cache_alloc(&pmap_cache); if (p == PMAP_NULL) panic("pmap_create"); @@ -1131,13 +1147,19 @@ pmap_t pmap_create(size) panic("pmap_create"); memcpy(p->dirbase, kernel_page_dir, PDPNUM * INTEL_PGBYTES); -#ifdef MACH_XEN +#ifdef LINUX_DEV +#if VM_MIN_KERNEL_ADDRESS != 0 + /* Do not map BIOS in user tasks */ + p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] = 0; +#endif +#endif +#ifdef MACH_PV_PAGETABLES { int i; for (i = 0; i < PDPNUM; i++) pmap_set_page_readonly((void*) p->dirbase + i * INTEL_PGBYTES); } -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ #if PAE if (kmem_alloc_wired(kernel_map, @@ -1149,9 +1171,9 @@ pmap_t pmap_create(size) for (i = 0; i < PDPNUM; i++) WRITE_PTE(&p->pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) p->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID); } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES pmap_set_page_readonly(p->pdpbase); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ #endif /* PAE */ p->ref_count = 1; @@ -1211,32 +1233,32 @@ void pmap_destroy(p) if (m == VM_PAGE_NULL) panic("pmap_destroy: pte page not in object"); vm_page_lock_queues(); -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa))) panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa)); pmap_set_page_readwrite((void*) phystokv(pa)); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ vm_page_free(m); inuse_ptepages_count--; vm_page_unlock_queues(); vm_object_unlock(pmap_object); } } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES { int i; for (i = 0; i < PDPNUM; i++) pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES); } -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES); #if PAE -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES pmap_set_page_readwrite(p->pdpbase); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES); #endif /* PAE */ - zfree(pmap_zone, (vm_offset_t) p); + kmem_cache_free(&pmap_cache, (vm_offset_t) p); } /* @@ -1279,10 +1301,10 @@ void pmap_remove_range(pmap, va, spte, epte) int num_removed, num_unwired; int pai; vm_offset_t pa; -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES int n, ii = 0; struct mmu_update update[HYP_BATCH_MMU_UPDATES]; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ #if DEBUG_PTE_PAGE if (pmap != kernel_pmap) @@ -1311,7 +1333,7 @@ void pmap_remove_range(pmap, va, spte, epte) register int i = ptes_per_vm_page; register pt_entry_t *lpte = cpte; do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES update[ii].ptr = kv_to_ma(lpte); update[ii].val = 0; ii++; @@ -1321,9 +1343,9 @@ void pmap_remove_range(pmap, va, spte, epte) panic("couldn't pmap_remove_range\n"); ii = 0; } -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *lpte = 0; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ lpte++; } while (--i > 0); continue; @@ -1344,7 +1366,7 @@ void pmap_remove_range(pmap, va, spte, epte) do { pmap_phys_attributes[pai] |= *lpte & (PHYS_MODIFIED|PHYS_REFERENCED); -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES update[ii].ptr = kv_to_ma(lpte); update[ii].val = 0; ii++; @@ -1354,9 +1376,9 @@ void pmap_remove_range(pmap, va, spte, epte) panic("couldn't pmap_remove_range\n"); ii = 0; } -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *lpte = 0; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ lpte++; } while (--i > 0); } @@ -1402,13 +1424,13 @@ void pmap_remove_range(pmap, va, spte, epte) } } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (ii > HYP_BATCH_MMU_UPDATES) panic("overflowed array in pmap_remove_range"); hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF); if (n != ii) panic("couldn't pmap_remove_range\n"); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ /* * Update the counts @@ -1554,12 +1576,12 @@ void pmap_page_protect(phys, prot) do { pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED); -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pte++), 0)) panic("%s:%d could not clear pte %p\n",__FILE__,__LINE__,pte-1); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *pte++ = 0; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } while (--i > 0); } @@ -1589,12 +1611,12 @@ void pmap_page_protect(phys, prot) register int i = ptes_per_vm_page; do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WRITE)) - panic("%s:%d could not enable write on pte %p\n",__FILE__,__LINE__,pte); -#else /* MACH_XEN */ + panic("%s:%d could not disable write on pte %p\n",__FILE__,__LINE__,pte); +#else /* MACH_PV_PAGETABLES */ *pte &= ~INTEL_PTE_WRITE; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ pte++; } while (--i > 0); @@ -1683,14 +1705,14 @@ void pmap_protect(map, s, e, prot) spte = &spte[ptenum(s)]; epte = &spte[intel_btop(l-s)]; -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES int n, i = 0; struct mmu_update update[HYP_BATCH_MMU_UPDATES]; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ while (spte < epte) { if (*spte & INTEL_PTE_VALID) { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES update[i].ptr = kv_to_ma(spte); update[i].val = *spte & ~INTEL_PTE_WRITE; i++; @@ -1700,19 +1722,19 @@ void pmap_protect(map, s, e, prot) panic("couldn't pmap_protect\n"); i = 0; } -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *spte &= ~INTEL_PTE_WRITE; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } spte++; } -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (i > HYP_BATCH_MMU_UPDATES) panic("overflowed array in pmap_protect"); hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF); if (n != i) panic("couldn't pmap_protect\n"); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } s = l; pde++; @@ -1751,7 +1773,7 @@ void pmap_enter(pmap, v, pa, prot, wired) vm_offset_t old_pa; assert(pa != vm_page_fictitious_addr); -if (pmap_debug) printf("pmap(%x, %x)\n", v, pa); +if (pmap_debug) printf("pmap(%lx, %lx)\n", v, pa); if (pmap == PMAP_NULL) return; @@ -1786,7 +1808,7 @@ if (pmap_debug) printf("pmap(%x, %x)\n", v, pa); /* * Must allocate a new pvlist entry while we're unlocked; - * zalloc may cause pageout (which will lock the pmap system). + * Allocating may cause pageout (which will lock the pmap system). * If we determine we need a pvlist entry, we will unlock * and allocate one. Then we will retry, throughing away * the allocated entry later (if we no longer need it). @@ -1849,7 +1871,7 @@ Retry: /*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/ pdp = pmap_pde(pmap, v); do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES pmap_set_page_readonly((void *) ptp); if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp))) panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp)); @@ -1858,11 +1880,11 @@ Retry: | INTEL_PTE_USER | INTEL_PTE_WRITE)) panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp))); -#else /* MACH_XEN */ - *pdp = pa_to_pte(ptp) | INTEL_PTE_VALID - | INTEL_PTE_USER - | INTEL_PTE_WRITE; -#endif /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ + *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID + | INTEL_PTE_USER + | INTEL_PTE_WRITE; +#endif /* MACH_PV_PAGETABLES */ pdp++; ptp += INTEL_PGBYTES; } while (--i > 0); @@ -1902,12 +1924,12 @@ Retry: do { if (*pte & INTEL_PTE_MOD) template |= INTEL_PTE_MOD; -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template))) panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ WRITE_PTE(pte, template) -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ pte++; pte_increment_pa(template); } while (--i > 0); @@ -1970,9 +1992,9 @@ Retry: PMAP_READ_UNLOCK(pmap, spl); /* - * Refill from zone. + * Refill from cache. */ - pv_e = (pv_entry_t) zalloc(pv_list_zone); + pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache); goto Retry; } } @@ -2012,12 +2034,12 @@ Retry: template |= INTEL_PTE_WIRED; i = ptes_per_vm_page; do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!(hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template)))) panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ WRITE_PTE(pte, template) -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ pte++; pte_increment_pa(template); } while (--i > 0); @@ -2072,12 +2094,12 @@ void pmap_change_wiring(map, v, wired) map->stats.wired_count--; i = ptes_per_vm_page; do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WIRED))) panic("%s:%d could not wire down pte %p\n",__FILE__,__LINE__,pte); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *pte &= ~INTEL_PTE_WIRED; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ pte++; } while (--i > 0); } @@ -2208,7 +2230,7 @@ void pmap_collect(p) register int i = ptes_per_vm_page; register pt_entry_t *pdep = pdp; do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES unsigned long pte = *pdep; void *ptable = (void*) ptetokv(pte); if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0))) @@ -2216,9 +2238,9 @@ void pmap_collect(p) if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable))) panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable))); pmap_set_page_readwrite(ptable); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *pdep++ = 0; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } while (--i > 0); } @@ -2435,12 +2457,12 @@ phys_attribute_clear(phys, bits) { register int i = ptes_per_vm_page; do { -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits))) panic("%s:%d could not clear bits %lx from pte %p\n",__FILE__,__LINE__,bits,pte); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *pte &= ~bits; -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } while (--i > 0); } PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); @@ -2790,7 +2812,7 @@ void pmap_update_interrupt() } #endif /* NCPUS > 1 */ -#ifdef i386 +#if defined(__i386__) /* Unmap page 0 to trap NULL references. */ void pmap_unmap_page_zero () @@ -2801,12 +2823,12 @@ pmap_unmap_page_zero () if (!pte) return; assert (pte); -#ifdef MACH_XEN +#ifdef MACH_PV_PAGETABLES if (!hyp_mmu_update_pte(kv_to_ma(pte), 0)) printf("couldn't unmap page 0\n"); -#else /* MACH_XEN */ +#else /* MACH_PV_PAGETABLES */ *pte = 0; INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE); -#endif /* MACH_XEN */ +#endif /* MACH_PV_PAGETABLES */ } -#endif /* i386 */ +#endif /* __i386__ */ |