summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-08-15 13:21:46 +0200
committerRichard Braun <rbraun@sceen.net>2013-08-15 13:21:46 +0200
commit0733d208470fb79ae09512ef90a846d68c3f2ccc (patch)
tree096f2ab1c520047468acedf0fa2c97570e1fa25a
parentdbed15d14210034ada3386b3f12572f72b204a32 (diff)
x86/pmap: another minor naming change
Make it clear that the PTP mapping function is actually intended to map root PTPs only.
-rw-r--r--arch/x86/machine/pmap.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index a7c58784..506600ef 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -81,7 +81,7 @@ struct pmap_pt_level {
*
* List of users :
* - pmap_zero_mapping (1 page per CPU)
- * - pmap_ptp_mapping (up to PMAP_NR_RPTPS, 1 per CPU)
+ * - pmap_root_ptp_mapping (PMAP_NR_RPTPS pages per CPU)
* - CGA video memory (1 page)
*/
#define PMAP_RESERVED_PAGES (MAX_CPUS \
@@ -97,7 +97,7 @@ struct pmap_tmp_mapping {
};
static struct pmap_tmp_mapping pmap_zero_mappings[MAX_CPUS];
-static struct pmap_tmp_mapping pmap_ptp_mappings[MAX_CPUS];
+static struct pmap_tmp_mapping pmap_root_ptp_mappings[MAX_CPUS];
static struct pmap kernel_pmap_store;
struct pmap *kernel_pmap = &kernel_pmap_store;
@@ -428,8 +428,8 @@ pmap_bootstrap(void)
mutex_init(&pmap_zero_mappings[i].lock);
pmap_zero_mappings[i].va = pmap_bootalloc(1);
- mutex_init(&pmap_ptp_mappings[i].lock);
- pmap_ptp_mappings[i].va = pmap_bootalloc(PMAP_NR_RPTPS);
+ mutex_init(&pmap_root_ptp_mappings[i].lock);
+ pmap_root_ptp_mappings[i].va = pmap_bootalloc(PMAP_NR_RPTPS);
spinlock_init(&pmap_update_data[i].queue.lock);
list_init(&pmap_update_data[i].queue.cpu_requests);
@@ -557,42 +557,42 @@ pmap_zero_page(phys_addr_t pa)
}
static struct pmap_tmp_mapping *
-pmap_map_ptp(phys_addr_t pa)
+pmap_map_root_ptp(phys_addr_t pa)
{
- struct pmap_tmp_mapping *ptp_mapping;
+ struct pmap_tmp_mapping *root_ptp_mapping;
unsigned long va;
unsigned int i, offset;
thread_pin();
- ptp_mapping = &pmap_ptp_mappings[cpu_id()];
- mutex_lock(&ptp_mapping->lock);
+ root_ptp_mapping = &pmap_root_ptp_mappings[cpu_id()];
+ mutex_lock(&root_ptp_mapping->lock);
for (i = 0; i < PMAP_NR_RPTPS; i++) {
offset = i * PAGE_SIZE;
- va = ptp_mapping->va + offset;
+ va = root_ptp_mapping->va + offset;
pmap_kenter(va, pa + offset, VM_PROT_READ | VM_PROT_WRITE);
cpu_tlb_flush_va(va);
}
- return ptp_mapping;
+ return root_ptp_mapping;
}
static void
-pmap_unmap_ptp(struct pmap_tmp_mapping *ptp_mapping)
+pmap_unmap_root_ptp(struct pmap_tmp_mapping *root_ptp_mapping)
{
unsigned long va;
unsigned int i;
assert(thread_pinned());
- mutex_assert_locked(&ptp_mapping->lock);
+ mutex_assert_locked(&root_ptp_mapping->lock);
- va = ptp_mapping->va;
+ va = root_ptp_mapping->va;
pmap_kremove(va, va + (PMAP_NR_RPTPS * PAGE_SIZE));
for (i = 0; i < PMAP_NR_RPTPS; i++)
cpu_tlb_flush_va(va + (i * PAGE_SIZE));
- mutex_unlock(&ptp_mapping->lock);
+ mutex_unlock(&root_ptp_mapping->lock);
thread_unpin();
}
@@ -864,7 +864,7 @@ int
pmap_create(struct pmap **pmapp)
{
const struct pmap_pt_level *pt_level;
- struct pmap_tmp_mapping *ptp_mapping;
+ struct pmap_tmp_mapping *root_ptp_mapping;
struct vm_page *root_pages;
struct pmap *pmap;
pmap_pte_t *pt, *kpt;
@@ -917,8 +917,8 @@ pmap_create(struct pmap **pmapp)
/* The pmap list lock also protects the shared root page table entries */
mutex_lock(&pmap_list_lock);
- ptp_mapping = pmap_map_ptp(pmap->root_ptp_pa);
- pt = (pmap_pte_t *)ptp_mapping->va;
+ root_ptp_mapping = pmap_map_root_ptp(pmap->root_ptp_pa);
+ pt = (pmap_pte_t *)root_ptp_mapping->va;
memset(pt, 0, index * sizeof(pmap_pte_t));
index += PMAP_NR_RPTPS;
@@ -932,7 +932,7 @@ pmap_create(struct pmap **pmapp)
pt[index] = (pa | PMAP_PTE_RW | PMAP_PTE_P) & pt_level->mask;
}
- pmap_unmap_ptp(ptp_mapping);
+ pmap_unmap_root_ptp(root_ptp_mapping);
list_insert_tail(&pmap_list, &pmap->node);
mutex_unlock(&pmap_list_lock);
@@ -953,7 +953,7 @@ static void
pmap_enter_ptemap_sync_kernel(unsigned int index)
{
const struct pmap_pt_level *pt_level;
- struct pmap_tmp_mapping *ptp_mapping;
+ struct pmap_tmp_mapping *root_ptp_mapping;
struct pmap *pmap, *current;
pmap_pte_t *root_ptp;
@@ -966,11 +966,11 @@ pmap_enter_ptemap_sync_kernel(unsigned int index)
if (pmap == current)
continue;
- ptp_mapping = pmap_map_ptp(pmap->root_ptp_pa);
- root_ptp = (pmap_pte_t *)ptp_mapping->va;
+ root_ptp_mapping = pmap_map_root_ptp(pmap->root_ptp_pa);
+ root_ptp = (pmap_pte_t *)root_ptp_mapping->va;
assert(root_ptp[index] == 0);
root_ptp[index] = pt_level->ptes[index];
- pmap_unmap_ptp(ptp_mapping);
+ pmap_unmap_root_ptp(root_ptp_mapping);
}
mutex_unlock(&pmap_list_lock);