summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2025-07-14 09:16:51 -0400
committerAndrew Morton <akpm@linux-foundation.org>2025-07-24 19:12:35 -0700
commit92c99fc614737eaa99125283c306d2ebb13b101a (patch)
tree3caec5e20db6388ae1a3391fc7b78aea1ae06ca4
parent8678d1faf1d4c9c6a87237203fc23c9389e8f143 (diff)
mm/memory: introduce is_huge_zero_pfn() and use it in vm_normal_page_pmd()
Patch series "mm: introduce snapshot_page()", v3. This series introduces snapshot_page(), a helper function that can be used to create a snapshot of a struct page and its associated struct folio. This function is intended to help callers with a consistent view of a a folio while reducing the chance of encountering partially updated or inconsistent state, such as during folio splitting which could lead to crashes and BUG_ON()s being triggered. This patch (of 4): Let's avoid working with the PMD when not required. If vm_normal_page_pmd() would be called on something that is not a present pmd, it would already be a bug (pfn possibly garbage). While at it, let's support passing in any pfn covered by the huge zero folio by masking off PFN bits -- which should be rather cheap. Link: https://lkml.kernel.org/r/cover.1752499009.git.luizcap@redhat.com Link: https://lkml.kernel.org/r/4940826e99f0c709a7cf7beb94f53288320aea5a.1752499009.git.luizcap@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Signed-off-by: Luiz Capitulino <luizcap@redhat.com> Reviewed-by: Shivank Garg <shivankg@amd.com> Tested-by: Harry Yoo <harry.yoo@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--mm/memory.c2
2 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 4d5bb67dc4eca..7748489fde1b7 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -482,9 +482,14 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
return READ_ONCE(huge_zero_folio) == folio;
}
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
+ return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
}
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
@@ -632,6 +637,11 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
return false;
}
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return false;
diff --git a/mm/memory.c b/mm/memory.c
index b4fb559dd0c62..92fd18a5d8d1f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -668,7 +668,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
}
}
- if (is_huge_zero_pmd(pmd))
+ if (is_huge_zero_pfn(pfn))
return NULL;
if (unlikely(pfn > highest_memmap_pfn))
return NULL;