summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-08 04:22:04 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 09:22:42 -0800
commit2f569afd9ced9ebec9a6eb3dbf6f83429be0a7b4 (patch)
tree23a31763887d9505e62e9d7cc8ec2fa4b86bd380 /mm
parent13214adf738abc92b0a00c0763fd3be79eebaa7c (diff)
CONFIG_HIGHPTE vs. sub-page page tables.
Background: I've implemented 1K/2K page tables for s390. These sub-page page tables are required to properly support the s390 virtualization instruction with KVM. The SIE instruction requires that the page tables have 256 page table entries (pte) followed by 256 page status table entries (pgste). The pgstes are only required if the process is using the SIE instruction. The pgstes are updated by the hardware and by the hypervisor for a number of reasons, one of them is dirty and reference bit tracking. To avoid wasting memory the standard pte table allocation should return 1K/2K (31/64 bit) and 2K/4K if the process is using SIE. Problem: Page size on s390 is 4K, page table size is 1K or 2K. That means the s390 version for pte_alloc_one cannot return a pointer to a struct page. Trouble is that with the CONFIG_HIGHPTE feature on x86 pte_alloc_one cannot return a pointer to a pte either, since that would require more than 32 bit for the return value of pte_alloc_one (and the pte * would not be accessible since its not kmapped). Solution: The only solution I found to this dilemma is a new typedef: a pgtable_t. For s390 pgtable_t will be a (pte *) - to be introduced with a later patch. For everybody else it will be a (struct page *). The additional problem with the initialization of the ptl lock and the NR_PAGETABLE accounting is solved with a constructor pgtable_page_ctor and a destructor pgtable_page_dtor. The page table allocation and free functions need to call these two whenever a page table page is allocated or freed. pmd_populate will get a pgtable_t instead of a struct page pointer. To get the pgtable_t back from a pmd entry that has been installed with pmd_populate a new function pmd_pgtable is added. It replaces the pmd_page call in free_pte_range and apply_to_pte_range. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c32
-rw-r--r--mm/vmalloc.c2
2 files changed, 16 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 153a54b2013..e5628a5fd67 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -134,11 +134,9 @@ void pmd_clear_bad(pmd_t *pmd)
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
{
- struct page *page = pmd_page(*pmd);
+ pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
- pte_lock_deinit(page);
- pte_free_tlb(tlb, page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ pte_free_tlb(tlb, token);
tlb->mm->nr_ptes--;
}
@@ -309,21 +307,19 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
- struct page *new = pte_alloc_one(mm, address);
+ pgtable_t new = pte_alloc_one(mm, address);
if (!new)
return -ENOMEM;
- pte_lock_init(new);
spin_lock(&mm->page_table_lock);
- if (pmd_present(*pmd)) { /* Another has populated it */
- pte_lock_deinit(new);
- pte_free(mm, new);
- } else {
+ if (!pmd_present(*pmd)) { /* Has another populated it ? */
mm->nr_ptes++;
- inc_zone_page_state(new, NR_PAGETABLE);
pmd_populate(mm, pmd, new);
+ new = NULL;
}
spin_unlock(&mm->page_table_lock);
+ if (new)
+ pte_free(mm, new);
return 0;
}
@@ -334,11 +330,13 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
return -ENOMEM;
spin_lock(&init_mm.page_table_lock);
- if (pmd_present(*pmd)) /* Another has populated it */
- pte_free_kernel(&init_mm, new);
- else
+ if (!pmd_present(*pmd)) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
+ new = NULL;
+ }
spin_unlock(&init_mm.page_table_lock);
+ if (new)
+ pte_free_kernel(&init_mm, new);
return 0;
}
@@ -1390,7 +1388,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
{
pte_t *pte;
int err;
- struct page *pmd_page;
+ pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
@@ -1401,10 +1399,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
BUG_ON(pmd_huge(*pmd));
- pmd_page = pmd_page(*pmd);
+ token = pmd_pgtable(*pmd);
do {
- err = fn(pte, pmd_page, addr, data);
+ err = fn(pte, token, addr, data);
if (err)
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0536dde139d..950c0be9ca8 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -820,7 +820,7 @@ void __attribute__((weak)) vmalloc_sync_all(void)
}
-static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
/* apply_to_page_range() does all the hard work. */
return 0;