summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/proc/page.c1
-rw-r--r--include/linux/hugetlb.h7
-rw-r--r--mm/hugetlb.c98
-rw-r--r--mm/internal.h5
-rw-r--r--mm/page_alloc.c17
5 files changed, 73 insertions, 55 deletions
diff --git a/fs/proc/page.c b/fs/proc/page.c
index e9983837d08..38dd88b7ce8 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -6,6 +6,7 @@
#include <linux/mmzone.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include "internal.h"
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 03be7f29ca0..a05a5ef3339 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -11,6 +11,8 @@
struct ctl_table;
+int PageHuge(struct page *page);
+
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_HUGETLB;
@@ -61,6 +63,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
#else /* !CONFIG_HUGETLB_PAGE */
+static inline int PageHuge(struct page *page)
+{
+ return 0;
+}
+
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7b9b6015b2e..a56e6f3ce97 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -578,41 +578,6 @@ static void free_huge_page(struct page *page)
hugetlb_put_quota(mapping, 1);
}
-/*
- * Increment or decrement surplus_huge_pages. Keep node-specific counters
- * balanced by operating on them in a round-robin fashion.
- * Returns 1 if an adjustment was made.
- */
-static int adjust_pool_surplus(struct hstate *h, int delta)
-{
- static int prev_nid;
- int nid = prev_nid;
- int ret = 0;
-
- VM_BUG_ON(delta != -1 && delta != 1);
- do {
- nid = next_node(nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
-
- /* To shrink on this node, there must be a surplus page */
- if (delta < 0 && !h->surplus_huge_pages_node[nid])
- continue;
- /* Surplus cannot exceed the total number of pages */
- if (delta > 0 && h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid])
- continue;
-
- h->surplus_huge_pages += delta;
- h->surplus_huge_pages_node[nid] += delta;
- ret = 1;
- break;
- } while (nid != prev_nid);
-
- prev_nid = nid;
- return ret;
-}
-
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
set_compound_page_dtor(page, free_huge_page);
@@ -623,6 +588,34 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
put_page(page); /* free it into the hugepage allocator */
}
+static void prep_compound_gigantic_page(struct page *page, unsigned long order)
+{
+ int i;
+ int nr_pages = 1 << order;
+ struct page *p = page + 1;
+
+ /* we rely on prep_new_huge_page to set the destructor */
+ set_compound_order(page, order);
+ __SetPageHead(page);
+ for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ __SetPageTail(p);
+ p->first_page = page;
+ }
+}
+
+int PageHuge(struct page *page)
+{
+ compound_page_dtor *dtor;
+
+ if (!PageCompound(page))
+ return 0;
+
+ page = compound_head(page);
+ dtor = get_compound_page_dtor(page);
+
+ return dtor == free_huge_page;
+}
+
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
@@ -1140,6 +1133,41 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
}
#endif
+/*
+ * Increment or decrement surplus_huge_pages. Keep node-specific counters
+ * balanced by operating on them in a round-robin fashion.
+ * Returns 1 if an adjustment was made.
+ */
+static int adjust_pool_surplus(struct hstate *h, int delta)
+{
+ static int prev_nid;
+ int nid = prev_nid;
+ int ret = 0;
+
+ VM_BUG_ON(delta != -1 && delta != 1);
+ do {
+ nid = next_node(nid, node_online_map);
+ if (nid == MAX_NUMNODES)
+ nid = first_node(node_online_map);
+
+ /* To shrink on this node, there must be a surplus page */
+ if (delta < 0 && !h->surplus_huge_pages_node[nid])
+ continue;
+ /* Surplus cannot exceed the total number of pages */
+ if (delta > 0 && h->surplus_huge_pages_node[nid] >=
+ h->nr_huge_pages_node[nid])
+ continue;
+
+ h->surplus_huge_pages += delta;
+ h->surplus_huge_pages_node[nid] += delta;
+ ret = 1;
+ break;
+ } while (nid != prev_nid);
+
+ prev_nid = nid;
+ return ret;
+}
+
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
{
diff --git a/mm/internal.h b/mm/internal.h
index 4b1672a8cf7..b4ac332e807 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -16,9 +16,6 @@
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
-extern void prep_compound_page(struct page *page, unsigned long order);
-extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
-
static inline void set_page_count(struct page *page, int v)
{
atomic_set(&page->_count, v);
@@ -51,6 +48,8 @@ extern void putback_lru_page(struct page *page);
*/
extern unsigned long highest_memmap_pfn;
extern void __free_pages_bootmem(struct page *page, unsigned int order);
+extern void prep_compound_page(struct page *page, unsigned long order);
+
/*
* function for dealing with page's order in buddy system.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8ca06d87dc1..131655cdb6b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -300,23 +300,6 @@ void prep_compound_page(struct page *page, unsigned long order)
}
}
-#ifdef CONFIG_HUGETLBFS
-void prep_compound_gigantic_page(struct page *page, unsigned long order)
-{
- int i;
- int nr_pages = 1 << order;
- struct page *p = page + 1;
-
- set_compound_page_dtor(page, free_compound_page);
- set_compound_order(page, order);
- __SetPageHead(page);
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
- __SetPageTail(p);
- p->first_page = page;
- }
-}
-#endif
-
static int destroy_compound_page(struct page *page, unsigned long order)
{
int i;